diff --git a/LICENSE b/LICENSE index 4032d95..0bb898d 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,188 @@ -MIT License - -Copyright (c) 2020 Noah's Ark Lab / Huawei - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Apache License, Version 2.0 + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/MANIFEST.in b/MANIFEST.in index 5094c3b..901d2a0 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,5 @@ #dispatch files to site-packages recursive-include docs * -recursive-include evaluate_service * recursive-include examples * include LICENSE include MANIFEST.in diff --git a/README.cn.md b/README.cn.md index b193cc3..5ae3cbc 100644 --- a/README.cn.md +++ b/README.cn.md @@ -9,13 +9,13 @@ --- -**Vega ver1.7.1 released** +**Vega ver1.8.0 发布** -- Bug修复: +- 特性增强 - - 增加评估服务最大尝试次数限制. - - 使用SafeLoader加载YAML文件. - - 增加评估服务输入参数异常处理. + - 安全增强,组件间通信支持安全协议。 + - 提供独立的评估服务安装。 + - 更新Auto-lane模型,提供auto-lane推理代码。 --- @@ -30,16 +30,6 @@ Vega是诺亚方舟实验室自研的AutoML算法工具链,有主要特点: 5. 多Backend支持:支持PyTorch(GPU, Ascend 910), TensorFlow(GPU, Ascend 910), MindSpore(Ascend 910).。 6. 支持昇腾平台:支持在Ascend 910搜索和训练,支持在Ascend 310上模型评估。 -## AutoML工具特性 - -| | 平台 | HPO算法 | NAS算法 | 端侧评估 | 模型过滤 | 统一网络 | -| :--: | :-- | :-- | :-- | :-- | :-- | :-- | -| **AutoGluon** | mxnet, PyTorch | Random Search, Bayesian, Hyper-Band | Random Search, RL | × | × | × | -| **AutoKeras** | Keras | No Restrictions | Network Morphism | × | × | × | -| **Model Search** | TensorFlow | No Restrictions | Random Search, Beam Search | × | × | × | -| **NNI** | No Restrictions | Random Search and Grid Search, Bayesian, Annealing, Hyper-Band, Evolution, RL | Random Search, Gradient-Based, One-Shot | × | × | × | -| **Vega** | PyTorch, TensorFlow, MindSpore | Random Search, Grid Search, Bayesian, Hyper-Band, Evolution | Random Search, Gradient-Based, Evalution, One-Shot | Ascend 310, Kirin 980/990 | Quota (在NAS搜索中根据parameters, flops, latency过滤模型) | 提供同时用于PyTorch、TensorFlow和MindSpore的网络 | - ## 算法列表 | 分类 | 算法 | 说明 | 参考 | @@ -67,14 +57,12 @@ Vega是诺亚方舟实验室自研的AutoML算法工具链,有主要特点: ## 安装 -执行如下命令安装Vega和相关开源软件: +执行如下命令安装Vega: ```bash pip3 install --user --upgrade noah-vega ``` -若需要在Ascend 910训练环境上安装,请联系我们。 - ## 使用 使用`vega`命令来运行Vega应用,比如可执行如下命令运行`CARS`算法: @@ -86,12 +74,18 @@ vega ./examples/nas/cars/cars.yml 其中`cars.yml`中包含了pipeline、搜索算法、搜索空间、训练参数等定义。 Vega提供了40+示例供参考:[示例](https://github.com/huawei-noah/vega/tree/master/examples)、[示例参考](./docs/cn/user/examples.md)、[配置参考](./docs/cn/user/config_reference.md)。 +安全模式适用于通信安全要求高的场景,在运行前请执行[安全配置](./docs/cn/security.md): + +```bash +vega ./examples/nas/cars/cars.yml -s +``` + ## 参考 | 对象 | 参考 | | :--: | :-- | -| [**用户**
(用户指南)](./docs/cn/user/README.md) | [安装指导](./docs/cn/user/install.md)、[部署指导](./docs/cn/user/deployment.md)、[配置指导](./docs/cn/user/config_reference.md)、[示例参考](./docs/cn/user/examples.md)、[评估服务](./docs/cn/user/evaluate_service.md)、任务参考([分类](./docs/cn/tasks/classification.md)、[检测](./docs/cn/tasks/detection.md)、[分割](./docs/cn/tasks/segmentation.md)、[超分](./docs/cn/tasks/segmentation.md)) | -| [**开发者**
(开发者指南)](./docs/cn/developer/README.md) | [开发者指导](./docs/cn/developer/developer_guide.md)、[快速入门指导](./docs/cn/developer/quick_start.md)、[数据集指导](./docs/cn/developer/datasets.md)、[算法开发指导](./docs/cn/developer/new_algorithm.md)、[细粒度搜索空间指导](./docs/cn/developer/fine_grained_space.md) | +| **用户** | [安装指导](./docs/cn/user/install.md)、[部署指导](./docs/cn/user/deployment.md)、[安全配置](./docs/cn/security.md)、[配置指导](./docs/cn/user/config_reference.md)、[示例参考](./docs/cn/user/examples.md)、[评估服务](./evaluate_service/docs/cn/evaluate_service.md) | +| **开发者** | [开发者指导](./docs/cn/developer/developer_guide.md)、[快速入门指导](./docs/cn/developer/quick_start.md)、[数据集指导](./docs/cn/developer/datasets.md)、[算法开发指导](./docs/cn/developer/new_algorithm.md) | ## FAQ @@ -113,4 +107,3 @@ Vega提供了40+示例供参考:[示例](https://github.com/huawei-noah/vega/t ## 合作和贡献 欢迎大家使用Vega,有任何疑问、求助、修改bug、贡献算法、完善文档,请在社区提交issue,我们会及时回复沟通交流。 -欢迎大家加入我们的QQ群: **833345709** 。 diff --git a/README.md b/README.md index eb0f7eb..f376176 100644 --- a/README.md +++ b/README.md @@ -8,13 +8,13 @@ --- -**Vega ver1.7.1 released** +**Vega ver1.8.0 released** -- Bug fixes: +- Feature enhancement: - - Maximum number of evaluation service attempts. - - Use SafeLoader to load the YAML file. - - Catch evaluation service input parameter exceptions. + - Security enhancement: Security protocols communication. + - Provide evaluation service release package. + - Update the auto-lane model and provide auto-lane inference sample code. --- @@ -29,16 +29,6 @@ Vega is an AutoML algorithm tool chain developed by Noah's Ark Laboratory, the m 5. Multi-Backend: PyTorch (GPU and Ascend 910), TensorFlow (GPU and Ascend 910), MindSpore (Ascend 910). 6. Ascend platform: Search and training on the Ascend 910 and model evaluation on the Ascend 310. -## AutoML Tools Features - -| | Supported Frameworks | HPO Algorithms | NAS Algorithms | Device-Side Evaluation | Model Filter | Universal Network | -| :--: | :-- | :-- | :-- | :-- | :-- | :-- | -| **AutoGluon** | mxnet, PyTorch | Random Search, Bayesian, Hyper-Band | Random Search, RL | × | × | × | -| **AutoKeras** | Keras | No Restrictions | Network Morphism | × | × | × | -| **Model Search** | TensorFlow | No Restrictions | Random Search, Beam Search | × | × | × | -| **NNI** | No Restrictions | Random Search and Grid Search, Bayesian, Annealing, Hyper-Band, Evolution, RL | Random Search, Gradient-Based, One-Shot | × | × | × | -| **Vega** | PyTorch, TensorFlow, MindSpore | Random Search, Grid Search, Bayesian, Hyper-Band, Evolution | Random Search, Gradient-Based, Evalution, One-Shot | Ascend 310, Kirin 980/990 | Quota (filter model based on parameters, flops, latency) | provides networks compatibility with PyTorch, TensorFlow, and MindSpore | - ## Algorithm List | Category | Algorithm | Description | reference | @@ -68,14 +58,12 @@ Vega is an AutoML algorithm tool chain developed by Noah's Ark Laboratory, the m ## Installation -Run the following commands to install Vega and related open-source software: +Run the following commands to install Vega: ```bash pip3 install --user --upgrade noah-vega ``` -If you need to install the Ascend 910 training environment, please contact us. - ## Usage Run the `vega` command to run the Vega application. For example, run the following command to run the `CARS` algorithm: @@ -87,12 +75,18 @@ vega ./examples/nas/cars/cars.yml The `cars.yml` file contains definitions such as pipeline, search algorithm, search space, and training parameters. Vega provides more than 40 examples for reference: [Examples](https://github.com/huawei-noah/vega/tree/master/examples), [Example Guide](./docs/en/user/examples.md), and [Configuration Guide](./docs/en/user/config_reference.md). +The security mode is applicable to communication with high security requirements. Before running this command, run the security configuration (./docs/en/security.md). + +```bash +vega ./examples/nas/cars/cars.yml -s +``` + ## Reference -| object | refrence | +| Reader | Refrence | | :--: | :-- | -| [**User**
(User Guide)](./docs/en/user/README.md) | [Install Guide](./docs/en/user/install.md), [Deployment Guide](./docs/en/user/deployment.md), [Configuration Guide](./docs/en/user/config_reference.md), [Examples](./docs/en/user/examples.md), [Evaluate Service](./docs/en/user/evaluate_service.md) | -| [**Developer**
(Developer Guide)](./docs/en/developer/README.md) | [Development Reference](./docs/en/developer/developer_guide.md), [Quick Start Guide](./docs/en/developer/quick_start.md), [Dataset Guide](./docs/en/developer/datasets.md), [Algorithm Development Guide](./docs/en/developer/new_algorithm.md), [Fine-Grained Search Space Guide](./docs/en/developer/fine_grained_space.md) | +| **User** | [Install Guide](./docs/en/user/install.md), [Deployment Guide](./docs/en/user/deployment.md), [Configuration Guide](./docs/en/user/config_reference.md), [Security Configuration](./docs/en/security.md), [Examples](./docs/en/user/examples.md), [Evaluate Service](./evaluate_service/docs/en/evaluate_service.md) | +| **Developer** | [Development Reference](./docs/en/developer/developer_guide.md), [Quick Start Guide](./docs/en/developer/quick_start.md), [Dataset Guide](./docs/en/developer/datasets.md), [Algorithm Development Guide](./docs/en/developer/new_algorithm.md) | ## FAQ @@ -114,4 +108,3 @@ For common problems and exception handling, please refer to [FAQ](./docs/en/user ## Cooperation and Contribution Welcome to use Vega. If you have any questions or suggestions, need help, fix bugs, contribute new algorithms, or improve the documentation, submit an issue in the community. We will reply to and communicate with you in a timely manner. -Welcome to join our QQ chatroom (Chinese): **833345709**. diff --git a/RELEASE.md b/RELEASE.md index ba30fc8..9564fff 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,4 +1,4 @@ -**Vega ver1.7.1 released:** +**Vega ver1.8.0 released:** **Introduction** @@ -19,4 +19,3 @@ Install Vega and the open source softwares that Vega depends on: **Cooperation and Contribution** Welcome to use Vega. If you have any questions or suggestions, need help, fix bugs, contribute new algorithms, or improve the documentation, submit an issue in the community. We will reply to and communicate with you in a timely manner. -Welcome to join our QQ chatroom (Chinese): **833345709**. diff --git a/docs/cn/algorithms/adelaide_ea.md b/docs/cn/algorithms/adelaide_ea.md index ad3b424..6cb60f6 100644 --- a/docs/cn/algorithms/adelaide_ea.md +++ b/docs/cn/algorithms/adelaide_ea.md @@ -98,16 +98,4 @@ mutate: ### 4. 算法输出 -输出结果包括一系列.pth文件(训练到配置文件中```num_iter```迭代次数的模型)、```result.csv```文件以及```pareto_front.csv```文件。```result.csv```文件记录了所有搜索模型,```pareto_front.csv```文件记录了所有```pareto_front```模型。.csv文件中包含了```encoding```、```flops```、```parameters```以及```mIOU```: - -1. ```encoding```:19位字符串表示了模型的结构,19位字符串以“_”为结尾(避免以“0”开头的```encoding```造成记录错误)。 - -2. ```flops```:记录的是模型的Macc值,如:1371603728表示的就是1.277G。 - -3. ```parameters```:记录的是模型的parameters值,如:3162900表示的就是3.016M。 - -4. ```mIOU```:记录的是训练到配置文件中num_iter迭代次数后的模型mIOU。 - -## 5. Benchmark - -请参考 [adelaide_ea.yml](https://github.com/huawei-noah/vega/blob/master/examples/nas/adelaide_ea/adelaide_ea.yml)。 +输出结果包括预训练模型、架构描述文件、和性能结果文件,其中架构描述文件中,`encoding`使用19位字符串表示了模型的结构,19位字符串以“_”为结尾(避免以“0”开头的`encoding`造成记录错误)。 diff --git a/docs/cn/algorithms/modnas.md b/docs/cn/algorithms/modnas.md index afa0aeb..3ef1443 100644 --- a/docs/cn/algorithms/modnas.md +++ b/docs/cn/algorithms/modnas.md @@ -302,10 +302,6 @@ search_space: 现在,我们在基础模型之上定义了一个超网,其中原来的卷积算子被指定的混合算子和原语替换。然后,可以通过将搜索空间与选定的优化器和估计器匹配来设置搜索例程。 -## 已知问题 - -- 目前, ModularNAS例程在单独的线程中运行,并监听Vega中的条件变量,这可能导致死锁。 - ## 参考文献 [^fn1]: Liu, H., Simonyan, K., and Yang, Y. Darts: Differentiable architecture search. ArXiv, abs/1806.09055, 2019b. diff --git a/docs/cn/algorithms/nago.md b/docs/cn/algorithms/nago.md index 9ca0d42..a8b0c8d 100644 --- a/docs/cn/algorithms/nago.md +++ b/docs/cn/algorithms/nago.md @@ -88,7 +88,4 @@ search_algorithm: ### 5. 算法输出 -以下两个输出文件会在指定的输出目录中生成(默认输出目录是 `./example/tasks//output/nas/` ): - -- `output.csv` 文件包含了BOHB推荐的最优网络结构生成器的超参数值。 -- `reports.csv` 文件包含了BOHB搜索过程中评估过的所有超参组合的数据。 +包含最优超参的文件 `desc_nn.json` 在目录`./tasks//output/nas/`中。 diff --git a/docs/cn/algorithms/pba.md b/docs/cn/algorithms/pba.md index 917934e..5df6a6e 100644 --- a/docs/cn/algorithms/pba.md +++ b/docs/cn/algorithms/pba.md @@ -105,18 +105,3 @@ PBA算法在vega pipeline上使用参数配置文件中的默认参数(搜索阶 |:--:|:--:|:--:|:--:|:--:| |Ho et at.,2019|96.13%|96.92%|97.32%|97.42%| |Vega Pipeline|96.26%|97.18%| \ |97.57%| - -最终输出文件和目录如下: - -```text -output: - best_hps.json: 其中为pba算法搜索得到的最佳数据增广策略表及其搜索阶段的ID与得分 - hps.csv: 其中为pba算法搜索阶段得到的16组数据增广策略表的ID与得分 - score_board.csv: 其中为pba算法搜索阶段得到的16组数据增广操作每轮迭代过程中的具体得分与状态 -workers: - hpo: 其中16个文件夹分别为16组模型的最终结果,包括得分与模型等 - 0: - 1: - ... - 16: -``` diff --git a/docs/cn/algorithms/quant_ea.md b/docs/cn/algorithms/quant_ea.md index 26b01d0..5241d24 100644 --- a/docs/cn/algorithms/quant_ea.md +++ b/docs/cn/algorithms/quant_ea.md @@ -73,8 +73,4 @@ nas和fully_train两个过程会依次进行,搜索过程会搜出Pareto前沿 ### 5. 算法输出 -输出文件: - -- 搜索到的帕雷托前沿的模型经充分训练后得到的模型及结果 -- `reports.csv`包含了搜索过程中所有模型的encoding/flops/parameters/accuracy; -- `output.csv`包含了搜索出来的pareto front的信息。 +输出未搜索到的帕雷托前沿的模型经充分训练后得到的模型及结果,在目录`./tasks//output/nas/`中。 diff --git a/docs/cn/algorithms/sp_nas.md b/docs/cn/algorithms/sp_nas.md index 4bdfd19..da90d62 100644 --- a/docs/cn/algorithms/sp_nas.md +++ b/docs/cn/algorithms/sp_nas.md @@ -137,4 +137,4 @@ fine_tune: ## Benchmark -Benchmark配置信息请参考: [spnas.yml](https://github.com/huawei-noah/vega/tree/master/examples/nas/sp_nas/spnas.yml) +Benchmark配置信息请参考: [spnas.yml](https://github.com/huawei-noah/vega/blob/master/examples/nas/sp_nas/spnas.yml) diff --git a/docs/cn/algorithms/sr_ea.md b/docs/cn/algorithms/sr_ea.md index 74bc01f..52e5f70 100644 --- a/docs/cn/algorithms/sr_ea.md +++ b/docs/cn/algorithms/sr_ea.md @@ -81,7 +81,4 @@ mutate: ### 算法输出 -算法的输出有 - -- 搜索到的帕雷托前沿的模型经充分训练后得到的模型及结果。 -- 随机搜索及进化搜索过程中所有模型的结果reports.csv,以及帕雷托前沿的结果output.csv。 +算法的输出为搜索到的帕雷托前沿的模型经充分训练后得到的模型及结果。 diff --git a/docs/cn/developer/developer_guide.md b/docs/cn/developer/developer_guide.md index 4d5e5db..c428446 100644 --- a/docs/cn/developer/developer_guide.md +++ b/docs/cn/developer/developer_guide.md @@ -1,17 +1,11 @@ # 开发参考 +**已过时,待刷新。** + ## 1. Vega简介 Vega的重点特性是网络架构搜索和超参优化,在网络架构搜索流程中,搜索空间`Search Space`、搜索算法`Search Algorithm`是核心部分,并通过`Generator`来控制搜索的采样、更新和结束等流程步骤。 -搜索空间和搜索算法的类图如下所示: - -![Search Space类图](../../images/search_space_classes.png) - -搜索空间和搜索算法的流程图如下所示: - -![Search Space流程图](../../images/search_space_flow.png) - 以下就分别介绍下面几个部分: * 搜索空间 diff --git a/docs/cn/developer/fine_grained_search_space.md b/docs/cn/developer/fine_grained_search_space.md deleted file mode 100644 index fb2fe43..0000000 --- a/docs/cn/developer/fine_grained_search_space.md +++ /dev/null @@ -1,268 +0,0 @@ -# 搜索空间和细粒度网络指导 - -## 1. 细粒度简介 - -在Automl的大多数算法中搜索空间和网络是强相关的,每种搜索算法都会定义一系列与之识别的搜索空间和网络类型,这些网络类型大都在基础网络上做一些较少的改动,导致网络不能复用。另外,搜索空间和搜索算法也是强耦合的,每个算法都有自己的搜索空间的定义,这种搜索空间只能用于特定的场景,缺乏通用性和扩展能力。 - -我们对这些问题进行了分析,提出了通用的SearchSpace细粒度网络的方案: - -- 能够统一搜索空间的定义方式,同一种搜索空间能够适配不同的搜索算法 -- 能够对基础网络进行复用,提供细粒度的网络,通过组合的模式构建出不同形式的网络。 -- 搜索空间能够根据定义出来的网络自由扩展。 -- 支持多个backend - -## 2. 细粒度演示 - -### 2.1. 使用细粒度构建一个网络 - -- 继承Module基类,并调用`@ClassFactory.register(ClassType.NETWORK)`注册网络 -- 沿用了pytorch的风格,我们会将`self.xx`的变量放入到模块中,默认按照顺序执行。 -- 如果需要自定义moduels的执行顺序,可以重写`call`方法 - -```python -from vega.common import ClassFactory, ClassType -from vega.modules.module import Module -from vega.modules.operators import ops - -@ClassFactory.register(ClassType.NETWORK) -class SimpleCnn(Module): - def __init__(self, block_nums=3, filters=32, kernel_size=3): - super(SimpleCnn, self).__init__() - in_channels = 3 - out_channels = filters - output_size = 32 - for i in range(block_nums): - block = ConvBlock(in_channels, out_channels, kernel_size) - self.add_module("block{}".format(i), block) - in_channels = out_channels - output_size = (output_size - kernel_size + 1) // 2 - self.fc1 = ops.Linear(in_channels * output_size * output_size, 120) - self.relu = ops.Relu() - self.fc2 = ops.Linear(120, 10) - -@ClassFactory.register(ClassType.NETWORK) -class ConvBlock(Module): - def __init__(self, in_channels, out_channels, kernel_size=3): - super(ConvBlock, self).__init__() - self.conv = ops.Conv2d(in_channels, out_channels, kernel_size) - self.bn = ops.BatchNorm2d(out_channels) - self.relu = ops.Relu() - self.pool = ops.MaxPool2d((2, 2)) - - def call(x): - x = self.conv(x) - x = self.bn(x) - x = self.relu(x) - return self.pool(x) - -model = SimpleCnn() -print(model) -``` - -### 2.2. 定义Search Space并使用随机搜索对网络进行搜索 - -- 利用Vega的pipeline能力 - - ```yaml - pipeline: [hpo] - - hpo: - pipe_step: - type: SearchPipeStep - - search_algorithm: - type: RandomSearch - - search_space: - type: SearchSpace - hyperparameters: - - key: backbone.block1.conv.in_channels - type: CATEGORY - range: [8, 16, 32, 64, 128, 256] - model: - model_desc: - modules: ["backbone"] - backbone: - type: SimpleCnn - dataset: - type: Cifar10 - common: - data_path: /cache/datasets/cifar10/ - batch_size: 256 - trainer: - type: Trainer - epochs: 1 - ``` - -- 编写代码单独使用 - -```python -from vega.algorithms.hpo.random_hpo import RandomSearch -from vega.core.search_space import SearchSpace -from vega.core.search_space.param_types import ParamTypes -from vega.core.search_space.params_factory import ParamsFactory -from vega.networks.network_desc import NetworkDesc - -# SearchSpace的定义 -params = ParamsFactory.create_search_space( - param_name='backbone.block1.conv.in_channels', - param_type=ParamTypes.CATEGORY, - param_range=[8, 16, 32, 64, 128, 256]) -search_space = SearchSpace().add_hp(params) -# 搜索算法 -id, desc = RandomSearch(search_space).search() -# 解析成模型 -model = NetworkDesc(desc).to_model() -print(model) -``` - -## 3. 网络模块化分组 - -为了方便网络模块的重用,我们将细粒度的模块按照其功能的不同,进行了分组,每个分组都有其相应的特性。 - -- **Networks**:定义一个常用的网络,属于粗粒度的网络,如ResNet 和FasterRcnn。网络是其他分组中的子模块。 -- **Backbone**:骨干网络。通常采用backbone+ head的模式组成一个网络。在很多场景下我们可以自由的替换不同的backbone已达到处理不同的featureMap。 -- **Head**:一般用于特征融合,例如作为分类或者回归问题。这样可以确保更换不同的头,以适应不同的场景。 -- **Cells:**组合多个blocks,我们定义了多种Cells来定义组合场景. -- **Blocks**:由基本的算子构成,组合成一个特定功能的block。我们提供给了一些常用的block,这些Block可以用于不同的网络中。 -- **Connections**:定义模块之间的连接关系,包括Sequential、Add等,以及一些条件分支的实现语句,如Repeat。 -- **Operators:**定义底层算子,如conv、batch_normal等,我们在此对每个算子做了多个平台的适配,统一了对外的输入输出和接口调用。 - -例如一个ResNet18的组成如下: - -![resnet](../../images/resnet.png) - -## 4. Search Space的定义 - -Search Space 分为**hyper_parameters**和**condition**两部分: - -**hyper_parameters** - -用于表示超参的定义,包含key,type和value三个设置:key表示超参的名称,type指定了超参的类型即ParamType,系统根据ParamType选择不同的采样方式。range表示定义的采样的范围。 - -我们当前预置了如下几种ParamType: - -- **INT**: 从一个整数范围上采样一个值,如果range=[0, 10],表示从0到10中随机采样出一个value - -- **INT_EXP:**在整数范围上按照10的指数级采样方式采样一个值,如range=[0, 1000],会通过log函数映射到[0,10,100,1000]这几个值上 - -- **INT_CAT**:表示从多个INT类型的数值中选择一个,如range=[16, 32, 64, 128] - -- **FLOAT:** 从一个Float范围上采样一个值,如range=[0.001, 1],采样一个值 - -- **FLOAT_EXP**:在Float类型范围上按照10的指数级采样方式采样一个值,如range=[0.001, 1],会通过log函数映射到[1,0.01,0.001]这几个值上 - -- **FLOAT_CAT :** 表示从多个FLOAT类型的数值中选择一个,如range=[0.1, 0.01, 0.001, 0.99] - -- **STRING:** 表示从多个字符串中选择一个,如range=[‘block1’, 'block2', 'block3', 'block4'] - - - -**condition** - -用于表示2个节点之间的关系,当parent满足一定条件时,child节点才会生效 - -![img](http://hi3ms-image.huawei.com/hi/staticimages/hi3msh/images/2019/0731/15/5d414a699c009.png)![img](http://image.huawei.com/tiny-lts/v1/images/9ed3126327ed5a8abb80_844x290.png@900-0-90-f.png) - -这里用一个condition_range来传入条件的值或者范围。具体的: - -- **EQUAL**:condition_range只能包含一个parent的数值,表示child被选择,需要满足parent的值**等于**该数值; -- **NOT_EQUAL**:condition_range可以包含一个或多个parent的数值,表示child被选择,需要满足parent的值**不等于**condition_range中的提供的所有数值; -- **IN**:如果parent是range类型的,则condition_range必须包含两个值表示该cond_range的最小值和最大值,child被选中必须满足parent当前值落在该cond_range范围内;如果parent是CAT类型的,则condition_range必须包含一个或者多个parent数值,child被选中必须满足parent当前值落在condition_range中的某个数值上。 - -**forbidden** - -用于表示2节点之间的值的互斥关系,节点1含有某个值时,节点2的某些值不会被选择 - -## 5. 支持多个Backend - -我们对底层架构做了封装,统一上层的接口来适配多个不同的backend。其主要核心功能分为: - -- **Module**:实现自定义模块的需要继承的基类,统一了各个平台的对于模块内部操作的实现。 -- **ops**:上层调用算子的接口,统一了不同平台同一功能算子的命名和输入输出。 -- **Serializable:** 对模块中的超参和层次结构进行提取和解析,并序列化成json格式的字典。 - -![fine_grained_space](../../images/fine_grained_space.png) - -## 6. 如何进行细粒度网络的开发 - -对于算法开发者来说,我们希望其聚焦于网络结构和超参的搜索算法的开发,而不用关心网络本身构建。当前已预置了一些Modules和Networks能够提供该类型网络的超参定义和架构定义的描述,算法开发者只需要根据其描述通过搜索算法装配成新的网络。 - -### 6.1 定义一个Modules - -为了方便大家的使用,我们继承了pytorch的开发习惯,仅仅需要几行的变化就可以成为细粒度中的一个Module。 - -- 继承Module类,注册到`ClassFactory.register(ClassType.NETWORK)`中 -- 使用ops下的算子替换nn下的算子 -- 对于顺序执行的网络结构,我们默认会按照self的顺序生成网络,无需再实现forward方法 - -```python -@ClassFactory.register(ClassType.NETWORK) -class ConvBlock(Module): - def __init__(self, in_channels, out_channels, kernel_size=3): - super(ConvBlock, self).__init__() - self.conv = ops.conv2d(in_channels, out_channels, kernel_size) - self.bn = ops.batch_norm2d(out_channels) - self.relu = ops.relu() - self.pool = ops.max_pool2d((2, 2)) -``` - -- 如果对于输入需要进行特殊的处理,可以根据自己的需要重写`call`方法 - - ```python - @ClassFactory.register(ClassType.NETWORK) - class MixedOp(Module): - - def __init__(self, C, stride, ops_cands): - """Init MixedOp.""" - super(MixedOp, self).__init__() - self.add_spaces(ops_cands, OPS[ops_cands](C, stride, True)) - - def call(self, x, weights=None, *args, **kwargs): - """Call function of MixedOp.""" - if weights is None: - for model in self.children(): - x = model(x) - return x - return ops.add_n(weights[idx] * op(x) for idx, op in enumerate(self.children()) if weights[idx] != 0) - ``` - -### 6.2 使用Connections组装多个模块 - -我们默认都会采用Sequential的方式组装多个网络,当其他的连接方法时需要手动调用连接的方法。如下面样例采用Add作为两个网络的加和拼接 - -```python -@ClassFactory.register(ClassType.NETWORK) -class BasicBlock(Module): - """Create BasicBlock SearchSpace.""" - - def __init__(self, inchannel, outchannel, groups=1, base_width=64, stride=1): - super(BasicBlock, self).__init__() - base_conv = BasicConv(inchannel,outchannel) - shortcut = ShortCut(inchannel,outchannel) - self.add_block = Add(base_conv, shortcut) - self.relu = ops.relu() -``` - -开发者也可以自己定义Connections: - -- 继承`ConnectionsDecorator`,并注册到`ClassFactory.register(ClassType.NETWORK)` -- init函数接受入参为`*models`,表示接受多个模块,我们会自动调用add_module将这些模块设置到modules中 -- 重写`call`方法,通过`self.children()`获取已经添加的模块,并进行详细的操作 - -```python -@ClassFactory.register(ClassType.NETWORK) -class Sequential(ConnectionsDecorator): - """Sequential Connections.""" - - def __init__(self, *models): - super(Sequential, self).__init__(*models) - - def compile(self, inputs): - """Override compile function, conect models into a seq.""" - output = inputs - models = self.children() - for model in models: - output = model(output) - return output -``` diff --git a/docs/cn/developer/new_algorithm.md b/docs/cn/developer/new_algorithm.md index 73439de..dead967 100644 --- a/docs/cn/developer/new_algorithm.md +++ b/docs/cn/developer/new_algorithm.md @@ -1,5 +1,7 @@ # 算法开发指导 +**已过时,待刷新。** + 向Vega库中新增算法,如新的网络搜索算法、模型压缩算法、超参优化算法、数据增广算法等,需要基于Vega提供的基础类进行扩展。AutoML算法的核心的核心是搜索空间、搜索算法、网络构造和评估,新增算法主要考虑这几个方面。 ## 1. 新增架构搜索算法 diff --git a/docs/cn/user/ascend_910.md b/docs/cn/user/ascend_910.md new file mode 100644 index 0000000..5ba594d --- /dev/null +++ b/docs/cn/user/ascend_910.md @@ -0,0 +1,146 @@ +# 部署Ascend环境 + +请参考Ascend官方文档部署Ascend环境,如下安装指导是安装过程中的关键步骤,若安装过程中出现问题,请以官方文档为准。 +在进行部署前,请在官方网站下载安装包。 + +## 1 检查已安装的Driver和CANN版本 + +若是全新的Ascend主机,需要检查是否存在`/usr/local/HiAi`目录,若存在,需要使用root账号执行如下命令卸载该目录: + +```bash +/usr/local/HiAi/uninstall.sh +``` + +需要使用非root账号执行如下命令创建`Ascend`目录,并给该目录设置为用户`HwHiAiUser`可访问: + +```bash +mkdir /usr/local/Ascend/ +sudo chown -R :HwHiAiUser /usr/local/Ascend/ +sudo chmod -R 750 /usr/local/Ascend/ +``` + +若`/usr/local/Ascend/`已存在,则需要在安装前需要检查是否已安装了较旧的Driver和CANN包,请使用如下命令查询各个组件的版本号: + +```bash +cat /usr/local/Ascend/driver/version.info +cat /usr/local/Ascend/firmware/version.info +cat /usr/local/Ascend/nnae/latest/ascend_nnae_install.info +cat /usr/local/Ascend/ascend-toolkit/latest/arm64-linux/ascend_toolkit_install.info +cat /usr/local/Ascend/tfplugin/latest/ascend_tfplugin_install.info +``` + +如上`/usr/local/Ascend`目录是较常使用的目录,也可能是`` + +若版本号较低,需要使用root账号执行卸载: + +```bash +/usr/local/Ascend/driver/script/uninstall.sh +/usr/local/Ascend/firmware/script/uninstall.sh +/usr/local/Ascend/nnae/latest/script/uninstall.sh +/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/script/uninstall.sh +/usr/local/Ascend/tfplugin/latest/script/uninstall.sh +``` + +若nnae、ascend-toolkit、tfplugin使用非root安装,请使用该用户卸载。 + +## 2 安装Driver和CANN + +使用root用户执行如下命令安装,如下版本号供参考: + +```bash +chmod +x *.run +./A800-9000-npu-driver_21.0.3.1_linux-aarch64.run --full +./A800-9000-npu-firmware_1.79.22.4.220.run --full +``` + +执行如下命令,确认安装是否成功: + +```bash +npu-smi info +``` + +使用非root用户安装其他包,在安装前,需要将该用户设置为和`HwHiAiUser`同组: + +```bash +usermod -a -G HwHiAiUser +``` + +```bash +./Ascend-cann-nnae_5.0.T306_linux-aarch64.run --install +./Ascend-cann-nnrt_5.0.T306_linux-aarch64.run --install +./Ascend-cann-tfplugin_5.0.T306_linux-aarch64.run --install +./Ascend-cann-toolkit_5.0.T306_linux-aarch64.run --install +``` + +安装完成后,根据提示需要重启主机。 + +## 3 配置rank_table_file + +请参考Ascend的官方文档,执行`hccn_tool`命令,生成`rank_table_file`。 + +## 4 配置环境变量 + +需要配置如下环境变量,建议放入`~/.bashrc`中: + +```bash +export HOME_DIR=/home/ +export HOST_ASCEND_BASE=/usr/local/Ascend +export JOB_ID= +export DEVICE_ID=0 +export RANK_TABLE_FILE= +export RANK_ID=0 +export RANK_SIZE=8 +export NPU_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +export BATCH_TASK_INDEX=0 +export TF_CPP_MIN_LOG_LEVEL=3 +export LD_PRELOAD=export LD_PRELOAD=/lib64/libgomp.so.1:$HOME_DIR/.local/lib/python3.7/site-packages/sklearn/__check_build/../../scikit_learn.libs/libgomp-d22c30c5.so.1.0.0 +export GLOG_v=3 +export USE_NPU=True +source /usr/local/Ascend/tfplugin/set_env.sh +source /usr/local/Ascend/ascend-toolkit/set_env.sh +source /usr/local/Ascend/nnae/set_env.sh +export PATH=$HOME_DIR/.local/bin:$PATH +export PYTHONPATH=$HOME_DIR/.local/lib/python3.7/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=$HOME_DIR/.local/lib/python3.7/site-packages/vega/security/kmc/aarch64:$LD_LIBRARY_PATH +``` + +如上``为当前用户名,``请设置一个整数,如`10087`,``请设置为该文件的全路径。 + +## 5 安装Vega及依赖包 + +先升级pip到最新版本: + +```bash +pip3 install --user --upgrade pip +``` + +先安装nnae、topi、hccl等组件包: + +```bash +export fwk_path='/usr/local/Ascend/nnae/latest' +export te_path=${fwk_path}'/fwkacllib/lib64/te-*.whl' +export topi_path=${fwk_path}'/fwkacllib/lib64/topi-*.whl' +export hccl_path=${fwk_path}'/fwkacllib/lib64/hccl-*.whl' +pip3 install --user ${te_path} +pip3 install --user ${topi_path} +pip3 install --user ${hccl_path} +``` + +再安装noah-vega,因Ascend环境特殊性,注意不要安装依赖包: + +```bash +pip3 install --user --no-deps noah-vega +``` + +再通过如下的命令查看Vega的依赖包: + +```bash +pip3 show noah-vega +``` + +另外要注意的是,dask和distributed这两个包,需要安装如下版本: + +```bash +pip3 install --user distributed==2021.7.0 +pip3 install --user dask==2021.7.0 +``` diff --git a/docs/cn/user/config_reference.md b/docs/cn/user/config_reference.md index 0ece95f..a75672c 100644 --- a/docs/cn/user/config_reference.md +++ b/docs/cn/user/config_reference.md @@ -159,6 +159,7 @@ fully_train: common: data_path: /cache/datasets/cifar10/ ``` +**注**: HCCL支持多机多卡,Horovod目前只支持单机多卡。 ## 3. NAS和HPO配置项 diff --git a/docs/cn/user/deployment.md b/docs/cn/user/deployment.md index 9008932..6c81f52 100644 --- a/docs/cn/user/deployment.md +++ b/docs/cn/user/deployment.md @@ -2,18 +2,14 @@ ## 1. 本地集群部署 -### 1.1 部署 - 本地集群部署Vega,需满足如下条件: -1. Ubuntu 18.04 or later。 -2. CUDA 10.0 -3. Python 3.7 -4. pip3 - -**注: 若需要在Ascend 910集群上部署,请和我们联系。** +1. Ubuntu 18.04 or EulerOS 2.0 SP8 +2. CUDA 10.0 or CANN 20.1 +3. Python 3.7 or later +4. pytorch, tensorflow(>1.14, <2.0) or mindspore -集群在部署时,需要在每个集群节点中安装vega和一些必备的软件包,可执行如下命令进行安装: +集群在部署时,需要在每个集群节点中安装vega: ```bash pip3 install --user --upgrade noah-vega @@ -25,23 +21,6 @@ pip3 install --user --upgrade noah-vega 以上工作完成后,集群已部署完成。 -### 1.2 校验 - -集群部署完成后,请执行以下命令检查集群是否可用: - -```bash -vega-verify-cluster -m -s ... -n -``` - -例如: - -```bash -vega-verify-cluster -m 192.168.0.2 -s 192.168.0.3 192.168.0.4 -n /home/alan/nfs_folder -``` - -校验结束后,会有显示"All cluster check items have passed."。 -若校验中出现错误,请根据异常信息调整集群。 - ## 参考 ### 安装MPI  @@ -76,6 +55,24 @@ vega-verify-cluster -m 192.168.0.2 -s 192.168.0.3 192.168.0.4 -n /home/alan/nfs_ ### 构建NFS +NFS是集群中用于数据共享的常用系统,若你所使用的集群中已经有NFS系统,请直接使用已有的NFS系统。 + +以下配置NFS的简单指导,可能不适用于所有的NFS系统,请根据实际集群环境调整。 + +在配置NFS服务器前,需要确定当前用户的在集群中的各个主机上的UID是否是同样的数值。若UID不相同,会造成无法访问NFS共享目录,需要调整当前用户的UID为同一个数值,同时要避免和其他用户的UID冲突。 + +查询当前用户的UID: + +```bash +id +``` + +修改当前用的UID,(请慎重修改,请咨询集群系统管理员获取帮助): + +```bash +sudo usermod -u +``` + NFS服务器设置: 1. 安装NFS服务器: @@ -97,13 +94,7 @@ NFS服务器设置: sudo bash -c "echo '/home//nfs_cache *(rw,sync,no_subtree_check,no_root_squash,all_squash)' >> /etc/exports" ``` -4. 将共享目录设置为`nobody`用户 - - ```bash - sudo chown -R nobody: //nfs_cache - ``` - -5. 重启nfs服务器: +4. 重启nfs服务器: ```bash sudo service nfs-kernel-server restart diff --git a/docs/cn/user/evaluate_service.md b/docs/cn/user/evaluate_service.md deleted file mode 100644 index 4d01af4..0000000 --- a/docs/cn/user/evaluate_service.md +++ /dev/null @@ -1,319 +0,0 @@ -# Evaluate Service - -## 1. 简介 - -模型评估服务是用于评估模型在特定硬件设备上的性能,如评估剪枝和量化后的模型在Atlas 200 DK上的准确率、模型大小和时延等。 - -评估服务目前支持的硬件设备为Davinci推理芯片(Atlas200 DK、ATLAS300产品和开发板环境Evb)和手机,后继会扩展支持更多的设备。 - -评估服务为CS架构, 评估服务在服务端部署, 客户端通过`REST`接口向服务端发送评估请求和获取结果。Vega在进行网络架构搜索时,可以利用评估服务进行实时检测模型性能。在搜索阶段产生备选网络后,可以将该网络模型发送给评估服务,评估服务完成模型评估后,返回评估结果给Vega,Vega根据评估结果,进行后继的搜索。这种实时的在实际的设备上的评估,有利于搜索出对实际硬件更加友好的网络结构。 - -## 2. 规格 - -支持的模型和硬件设备 - -| 算法 | 模型 | Atlas 200 DK |Atlas 300 | Bolt | -| :--: | :--: | :--: | :--: | :--: | -| Prune-EA | ResNetGeneral | √ | √ | √| -| ESR-EA | ESRN | | √ | √ | -| SR-EA | MtMSR | | √ | √ | -| Backbone-nas | ResNet | √ | √ | | -| CARS | CARSDartsNetwork | | √ | | -| Quant-EA | ResNetGeneral | √ | √ | √ | -| CycleSR | CycleSRModel | | | | -| Adlaide-EA | AdelaideFastNAS | | √ | | -| Auto-Lane | ResNetVariantDet | | | -| Auto-Lane | ResNeXtVariantDet | | | - -## 3. 评估服务部署 - -### 3.1 环境安装配置(可选) - -根据评估硬件(Atlas200 DK 、Atlas300、或者手机),分别按照如下章节指导配置。 - -### 3.1.1 安装 Atlas200 DK 环境(可选) - -#### 3.1.1.1 准备工作 - -1. 准备好一张8GB以上SD卡及读卡器。 -2. 已安装 ubuntu 16.04.3 的服务器一台。 -3. 下载系统镜像: [ubuntu-16.04.3-server-arm64.iso](http://old-releases.ubuntu.com/releases/16.04.3/ubuntu-16.04.3-server-arm64.iso) -4. 下载制卡脚本: make_sd_card.py 和 make_ubuntu_sd.sh,下载地址: -5. 下载开发者运行包: mini_developerkit-1.3.T34.B891.rar,下载地址: -6. 解压开发者运行包,并上传到用户目录下。 - -#### 3.1.1.2 安装和配置Atlas200 DK - -1. 将SD卡放入读卡器,并将读卡器与Ubuntu服务器的USB接口连接。 -2. Ubuntu服务器上安装依赖项: - - ```bash - apt-get install qemu-user-static binfmt-support python3-yaml gcc-aarch64-linux-gnu g++-aarch64-linux-gnu - ``` - -3. 执行如下命令查找SD卡所在的USB设备名称。 - - ```bash - fdisk -l - ``` - -4. 运行SD制卡脚本开始制卡,此处“USB设备名称”即为上一步得到的名称。 - - ```bash - python3 make_sd_card.py local USB设备名称 - ``` - -5. 制卡成功后,将SD卡从读卡器取出并插入Atlas 200 DK开发者板卡槽, 上电Atlas 200 DK开发者板。 - -#### 3.1.1.3 安装和配置评估服务器环境 - -1. 下载安装DDK包及同步lib库 - - 下载地址: - 安装步骤可参考官方文档: - -2. 配置交叉编译环境 - 需要在评估服务器上安装Atlas200 DK所需的编译环境,执行如下命令: - - ```bash - sudo apt-get install g++-aarch64-linux-gnu - ``` - -3. 在服务器的 `/etc/profile` 中配置如下环境变量,注意文件中的`/home/`要配置为正确的路径: - - ```bash - export DDK_PATH=/home//huawei/ddk - export PYTHONPATH=$DDK_PATH/site-packages/te-0.4.0.egg:$DDK_PATH/site-packages/topi-0.4.0.egg - export LD_LIBRARY_PATH=$DDK_PATH/uihost/lib:$DDK_PATH/lib/x86_64-linux-gcc5.4 - export PATH=$PATH:$DDK_PATH/toolchains/ccec-linux/bin:$DDK_PATH/uihost/bin - export TVM_AICPU_LIBRARY_PATH=$DDK_PATH/uihost/lib/:$DDK_PATH/uihost/toolchains/ccec-linux/aicpu_lib - export TVM_AICPU_INCLUDE_PATH=$DDK_PATH/include/inc/tensor_engine - export TVM_AICPU_OS_SYSROOT=/home//tools/sysroot/aarch64_Ubuntu16.04.3 - export NPU_HOST_LIB=/home//tools/1.32.0.B080/RC/host-aarch64_Ubuntu16.04.3/lib - export NPU_DEV_LIB=/home//tools/1.32.0.B080/RC/host-aarch64_Ubuntu16.04.3/lib - ``` - -4. 配置ssh互信 - 由于评估服务器和Atlas200 DK 之间需要进行文件传输以及远端命令的执行,因此需要分别在两个环境上配置ssh互信,确保脚本能够自动化运行。 - - a. 安装ssh:`sudo apt-get install ssh` - b. 生成密钥:`ssh-keygen -t rsa` 会在~/.ssh/文件下生成id_rsa, id_rsa.pub两个文件,其中id_rsa.pub是公钥 - c. 确认目录下的authorized_keys文件。若不存在需要创建, 并`chmod 600 ~/.ssh/authorized_keys`改变权限。 - d. 拷贝公钥:分别将公钥id_rsa.pub内容拷贝到其他机器的authorized_keys文件中。 - **注意**: 以上步骤需要在评估服务器和Atlas 200 DK 分别执行一遍, 确保这两台机器之间ssh互信。 - -### 3.1.2 安装配置Atlas300环境(可选) - -参考华为图灵官方教程自行安装配置: Atlas 300I 推理卡 用户指南(型号 3000) - -注意:上述文档可能发生更新, 请及时关注我们发布的更新或自行获取得到相应的指导文档。环境安装后一般需要设置相应的环境变量, 请参考上述指导文档进行相应配置。为了方便您更好地进行环境配置, 我们提供了相关环境变量配置的模板 [env_atlas300.sh](https://github.com/huawei-noah/vega/blob/master/evaluate_service/hardwares/davinci/env/env_atlas300.sh) 供您参考, 请您以实际安装环境为准。 - -由于Atlas300环境安装较为复杂, 为了确保您的环境安装正确, 请您完成安装后运行检查环境脚本[check_atlas300.sh](https://github.com/huawei-noah/vega/blob/master/evaluate_service/hardwares/davinci/env/check_atlas300.sh)。 - -### 3.1.3 安装和配置手机环境(可选) - -#### 3.1.3.1 准备工作 - -1. 准备Kirin 980手机1台,推荐Nova 5。 -2. 已安装 ubuntu 16.04.3 的服务器一台。 - -#### 3.1.3.2 安装和配置评估服务器和手机 - -1. 在linux 系统服务器上安装adb工具。 - - ```bash - apt install adb - ``` - -2. 通过USB端口将手机接入到评估服务器,并打开开发者选项,并在评估服务器上执行如下命令: - - ```bash - adb devices - ``` - - 出现如下信息即为连接成功: - - ```text - List of devices attached - E5B0119506000260 device - ``` - -#### 3.1.3.3 设备连接失败的处理 - -若在服务器上通过 `adb devices` 命令不能获取到设备,则可以通过以下步骤尝试连接: - -1. 在评估服务器上执行`lsusb`命令, 出现设备列表, 找到设备的ID。 - -2. 编辑51-android.rules 文件: - - ```bash - sudo vim /etc/udev/rules.d/51-android.rules - ``` - - 写入如下内容 - - ```text - SUBSYSTEM=="usb", ATTR{idVendor}=="12d1", ATTR{idProduct}=="107e", MODE="0666" - ``` - - 注意: 上面的12d1和107e是上一步查询到的ID。 - -3. 编辑adb_usb.ini 文件: - - ```bash - vim ~/.android/adb_usb.ini - ``` - - 写入如下内容: - - ```text - 0x12d1 - ``` - - 注意: 上面的12d1是步骤5.1查询到的ID。 - -4. 重启adb服务 - - ```bash - sudo adb kill-server - sudo adb start-server - ``` - -5. 再次执行`adb devices`,确认是否连接成功。 - -### 3.1.4 安装和配置麒麟990手机NPU环境(可选) -3.1.4.1 准备工作 -1. 准备Kirin 990手机1台,推荐mate30 pro。 -2. 已安装 ubuntu 16.04.3 的服务器一台。 - -3.1.4.2 安装和部署 -1 下载HUAWEI HiAI DDK, 下载链接:https://developer.huawei.com/consumer/cn/doc/development/hiai-Library/ddk-download-0000001053590180, -选择下载hwhiai-ddk-100.500.010.010.zip, 下载后解压到"/data/tools/"目录下, 解压后目录结构为"/data/tools/hwhiai-ddk-100.500.010.010/"。 -2 拷贝相关依赖文件到手机 -把tools_sysdbg目录下所有内容拷贝到手机上的/data/local/tmp目录下 -```bash -adb push /data/tools/hwhiai-ddk-100.500.010.010/tools/tools_sysdbg/* /data/local/tmp/ -``` -3 进入到手机上, 设置环境变量, 添加文件执行权限 -```bash -adb shell -export LD_LIBRARY_PATH=/data/local/tmp/ -chmod +x /data/local/tmp/model_run_tool -chmod +x /data/local/tmp/data_proc_tool -``` -4 安装adb调试工具 -参考3.1.3.2节。 - -### 3.2 安装和启动评估服务 - -1 安装:在评估服务器上安装vega, 安装时加上`--no-dependencies`参数, 不安装依赖项; -2 启动:运行命令`vega-evaluate_service-service -i {your_ip_adress} -w {your_work_path}`, 其中`-i`参数指定当前使用的服务器的ip地址, -`-w`参数指定工作路径, 程序运行时的中间文件将存储在该目录下,请使用绝对路径。 -其他可选参数的设置可查看该命令的帮助信息, 一般情况下建议采用默认值。 - -## 4. 使用评估服务 - -使用评估服务时, 只需要在配置文件中进行简单的几行配置即可, 如下面示例所示: - -```yaml -evaluator: - type: Evaluator - device_evaluator: - type: DeviceEvaluator - hardware: "Davinci" - remote_host: "http://192.168.0.2:8888" -``` - -`evaluator`的配置与您的`trainer`配置处于同一层级。其中需要配置的参数有2个, `hardware`为您指定的需要评估的硬件设备,当前支持`Davinci`和`Bolt`两种, -`remote_host`为您部署的评估服务器的ip和端口号。 - -## 5. 自定义评估服务(可选) - -vega评估服务当前已经支持Davinci推理芯片和手机等端侧设备的评估, 但新的硬件设备是层出不穷的, 因此评估服务提供了可自定义的扩展能力。 - -评估服务的流程是: - -1. 获取输入信息 -2. 根据需要评估的硬件实例化一个具体的硬件实例 -3. 模型转换 -4. 推理 -5. 返回推理结果 - -对于不同的硬件, 步骤3和4可能是不同的。 因此当需要添加新的硬件时, 需要根据具体硬件的用法实现这2个步骤。具体来说, 分以下几个步骤: - -在hardwares目录下添加一个硬件类, 并实现`convert_model`和`inference`两个接口 如下: - -```python -from class_factory import ClassFactory -@ClassFactory.register() -class MyHardware(object): - - def __init__(self, optional_params): - pass - - def convert_model(self, backend, model, weight, **kwargs): - pass - - def inference(self, converted_model, input_data, **kwargs): - - return latency, output -``` - -上面的示例中定义了`MyHardware`类, 并通过`@ClassFactory.register()`进行注册。 类中实现了`convert_model`和`inference`两个接口, `backend`表示模型是通过何种训练框架保存的, 如`pytorch`, `tensorflow`等, 为模型解析提供必要的辅助信息,`model`和`weight`分别表示需要转换的模型和权重,`weight`是非必须的,其值可能为空。`converted_model`和`input_data`分别表示转换之后的模型和输入数据。 - -然后在hardware的`__init__.py`中加入自定义的类。 - -```python -from .my_hardware import MyHardware -``` - -## 6. FAQ - -### 6.1 Pytorch模型转换caffe模型 - -如果需要将pytorch模型转换为caffe模型,请下载[PytorchToCaffe](https://github.com/xxradon/PytorchToCaffe)获取并放在`./third_party`目录下(third_party目录与vega处于同一目录层级)。 - -注意: 该第三方开源软件不支持pytorch1.1版本, 并且如果您使用原生torchvisoin中的模型, 当torchvision版本高于0.2.0时, 您需要做以下额外修改: -修改`pytorch_to_caffe.py`文件, 增加以下内容: - -```python - -def _flatten(raw , input, * args): - x = raw(input, *args) - if not NET_INITTED: - return x - layer_name=log.add_layer(name='flatten') - top_blobs=log.add_blobs([x],name='flatten_blob') - layer=caffe_net.Layer_param(name=layer_name,type='Reshape', - bottom=[log.blobs(input)],top=top_blobs) - start_dim = args[0] - end_dim = len(x.shape) - if len(args) > 1: - end_dim = args[1] - dims = [] - for i in range(start_dim): - dims.append(x.shape[i]) - cum = 1 - for i in range(start_dim, end_dim): - cum = cum * x.shape[i] - dims.append(cum) - if end_dim != len(x.shape): - cum = 1 - for i in range(end_dim, len(x.shape)): - cum = cum * x.shape[i] - dims.append(cum) - layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims)) - log.cnet.add_layer(layer) - return x - - -torch.flatten = Rp(torch.flatten,_flatten) -``` - -### 6.2 Pytorch 1.2版本及以下模型评估 - -如果您使用的`Pytorch`版本在1.2及以下, 在`Pytorch`模型转换为`onnx`模型时可能会遇到算子不支持的情况。 如`upsample_bilinear2d`算子不支持, 您可以选择升级`Pytorch`版本到1.3及以上, 或者您可以从`Pytorch`官方代码库中获取`pytorch/torch/onnx/symbolic_opset10.py`, 拷贝到对应的`Pytorch`安装目录下。 - -### 6.3 找不到`model_convert.sh`等脚本错误 - -评估服务中有很多`shell`脚本, 其文件格式应该为`unix`格式, 如果在windows上打开过相应文件, 或是`git`下载代码时进行了相应转换, 文件格式可能会变成`dos`格式, 需要转换为`unix`格式。 diff --git a/docs/cn/user/faq.md b/docs/cn/user/faq.md index 1a414c4..7d7a701 100644 --- a/docs/cn/user/faq.md +++ b/docs/cn/user/faq.md @@ -2,22 +2,14 @@ ## 1. 常见异常汇总 -### 1.1 异常 `ModuleNotFoundError: No module named 'mmdet'` - -运行SP-NAS等算法时,需要单独安装开源软件mmdetection,具体安装步骤请参考该软件的安装指导。 - -### 1.2 异常 `ModuleNotFoundError: No module named 'nasbench'` - -运行Benchmark时,需要单独安装开源软件NASBench,具体安装步骤请参考该软件的安装指导。 - -### 1.3 异常 `Exception: Failed to create model, model desc={}` +### 1.1 异常 `Exception: Failed to create model, model desc={}` 出现该类问题的原因有两类: 1. 该网络未注册到Vega中,在调用该网络前,需要使用`@ClassFactory.register`注册该网络,可参考示例。 2. 该网络的模型描述文件有错误,可通过异常信息中的``定位问题的原因。 -### 1.5 异常 `ImportError: libgthread-2.0.so.0: cannot open shared object file: No such file or directory` +### 1.2 异常 `ImportError: libgthread-2.0.so.0: cannot open shared object file: No such file or directory` 该异常可能是因为opencv-python缺少了系统依赖库,可尝试使用如下命令解决: @@ -25,7 +17,7 @@ sudo apt install libglib2.0-0 ``` -### 1.6 安装过程中出现异常 `ModuleNotFoundError: No module named 'skbuild'`,或者在安装过程中卡在`Running setup.py bdist_wheel for opencv-python-headless ...` +### 1.3 安装过程中出现异常 `ModuleNotFoundError: No module named 'skbuild'`,或者在安装过程中卡在`Running setup.py bdist_wheel for opencv-python-headless ...` 该异常可能是pip的版本过低,可尝试使用如下命令解决: @@ -33,17 +25,13 @@ sudo apt install libglib2.0-0 pip3 install --user --upgrade pip ``` -### 1.7 异常 `PermissionError: [Errno 13] Permission denied: 'dask-scheduler'`, `FileNotFoundError: [Errno 2] No such file or directory: 'dask-scheduler': 'dask-scheduler'`, 或者 `vega: command not found` +### 1.4 异常 `PermissionError: [Errno 13] Permission denied: 'dask-scheduler'`, `FileNotFoundError: [Errno 2] No such file or directory: 'dask-scheduler': 'dask-scheduler'`, 或者 `vega: command not found` 这类异常一般是因为在 `PATH` 路径中未找到 `dask-scheduler` ,一般该文件会安装在 `//.local/bin` 路径下。 在安装完 Vega ,会自动添加 `//.local/bin/` 到 `PATH` 环境变量中,但不会即时生效,需要该用户执行`source ~/.profile`,或者再次登录服务器后才会生效。 若问题还未解决,可先检查在 `//.local/bin` 路径下是否存在 `dask-scheduler` 文件。 若该文件已存在,则需要手动添加 `//.local/bin` 到环境变量 `PATH` 中。 -### 1.8 Pytorch模型评估时,出现异常 `FileNotFoundError: [Errno 2] No such file or directory: '/torch2caffe.prototxt'` - -请参考文档 [Evaluate Service](./evaluate_service.md) 6.1 章节。 - ## 2. 常见配置问题汇总 ### 2.1 如何配置多GPU/NPU支持 @@ -112,29 +100,11 @@ general: level: info # debug|info|warn|error| ``` -### 2.5 如何实时查看搜索进展 - -Vega提供了模型搜索过程可视化进展,用户只需在`USER.yml` 中配置`VisualCallBack`, 如下所示 - -```yaml - trainer: - type: Trainer - callbacks: [VisualCallBack, ] -``` - -可视化信息输出目录为: - -```text -./tasks//visual -``` - -在主机上执行`tensorboard --logdir PATH`如下启动服务,在浏览器上查看进展。具体可参考tensorboard的相关命令和指导。 - -### 2.6 如何终止后台运行的vega程序 +### 2.5 如何终止后台运行的vega程序 Vega在多个GPU/NPU场景中,会启动dask scheduler、dask worker及训练器,若仅仅杀死Vega主进程会造成部分进程不会及时的关闭,其占用的资源一直不会被释放。 -可使用如下命令终止Vega应用程序: +在安全模式下,可使用如下命令终止Vega应用程序: ```bash # 查询运行中的Vega主程序的进程ID @@ -146,3 +116,57 @@ vega-kill -a # 若主程序被非常正常关闭,还存在遗留的相关进程,可使用强制清理 vega-kill -f ``` + +在普通模式下,使用如下命令: + +```bash +vega-kill -s -l +vega-kill -s -p +vega-kill -s -a +vega-kill -s -f +``` + +### 2.6 如何查询正在运行的vega程序 + +在安全模式下,可通过如下命令查询正在运行的Vega应用程序: + +```bash +vega-process +``` + +在普通模式下,可通过如下命令查询: + +```bash +vega-process -s +``` + +### 2.7 如何查询vega程序运行进度 + +在安全模式下,可通过如下命令查询正在运行的Vega程序运行进度: + +```bash +vega-progress -t -r +``` + +在普通模式下,可通过如下命令查询: + +```bash +vega-progress -s -t -r +``` + +### 2.8 如何使用vega程序执行模型推理 + +可通过命令`vega-inference`执行分类模型推理,通过执行命令`vega-inference-det`执行检测模型推理。 + +通过如下命令查询命令参数。 + +```bash +vega-inference --help +vega-inference-det --help +``` + +## 3. 注意事项 + +### 3.1 请预留足够的磁盘空间 + +在Vega运行期间,会有缓存每一个搜索到的网络的模型,当搜索的数量较大是,需要较大的存储空间。请根据每个搜索算法的搜索网络模型的数量的大小,预留足够的磁盘空间。 diff --git a/docs/cn/user/install.md b/docs/cn/user/install.md index 522e728..6a49581 100644 --- a/docs/cn/user/install.md +++ b/docs/cn/user/install.md @@ -6,8 +6,8 @@ 1. Ubuntu 18.04 or EulerOS 2.0 SP8 2. CUDA 10.0 or CANN 20.1 -3. Python 3.7 -4. pip3 +3. Python 3.7 or later +4. pytorch, tensorflow(>1.14, <2.0) or mindspore ## 2. 安装Vega diff --git a/docs/cn/user/security_configure.md b/docs/cn/user/security_configure.md index c5ed967..087490f 100644 --- a/docs/cn/user/security_configure.md +++ b/docs/cn/user/security_configure.md @@ -1,174 +1,258 @@ # vega 安全配置 -## 用户数据保护 -用户用于训练的模型脚本/文件、预训练模型以及数据集属于比较重要的数据文件,需要做好安全保护,可以通过设置正确的文件权限来提升其安全性。可以通过如下命令来设置正确的文件权限 + +Vega的安全配置,包括如下步骤: + +1. 安装OpenSSL +2. 生成CA根证书 +3. 生成评估服务用的证书 +4. 生成Dask用的证书 +5. 加密私钥口令 +6. 配置安全相关的配置文件 +7. 配置评估服务守护服务 +8. 安装dask和distributed +9. 配置HCCL白名单 +10. 注意事项 + +## 1.安装OpenSSL + +首先要安装OpenSSL 1.1.1,从源码编译安装,或者直接安装编译后的发行包。 + +然后安装OpenSSL的python接口,如下: + ```shell -chmod 640 -R "file_path" +pip3 install --user pyOpenSSL==19.0.0 ``` -## 安全配置文件 -vega在启动时会尝试读取```~/.vega/vega.ini```配置文件中的内容,如果该文件不存在或者文件中的配置不正确,那么vega会报错并自动退出。 +## 2.生成CA证书 -用户在安装vega之后,可以通过命令```vega-security-config -i```初始化该文件,初始化之后该文件内容如下: -```ini -[security] -enable = True +执行如下命令生成CA证书: -[https] -cert_pem_file = -secret_key_file = +```shell +openssl genrsa -out ca.key 4096 +openssl req -new -x509 -key ca.key -out ca.crt -subj "/C=/ST=/L=/O=/OU=/CN=" ``` -```[security] -> enable```的默认配置为True,此时用户还需要配置```[https]```段落下的```cert_pem_file```与```secret_key_file。```关于如何生成这2个文件请参考下面的章节,生成文件之后用户可以直接编辑vega.ini配置这2项内容,也可以通过如下命令来配置 + +注意:以上``、``、``、``、``、``根据实际情况填写,本文后面的配置也是同样的。并且CA的配置需要和其他的不同。 + +## 3. 生成评估服务使用的证书 + +评估服务支持加密证书和普通证书: + +1. 若使用加密证书,需要安装华为公司的KMC安全组件,参考`生成加密证书`章节 +2. 若使用普通证书,参考`生成普通证书`章节 + +### 3.1 生成加密证书 + +执行如下脚本,生成评估服务器所使用的证书的加密私钥,执行该命令时,会提示输入加密密码,密码的强度要求如下: + +1. 密码长度大于等于8位 +2. 必须包含至少1位大写字母 +3. 必须包含至少1位小写字母 +4. 必须包含至少1位数字 + ```shell -vega-security-config -m https -c "cert_file_path" -k "key_file_path" -# 替换“cert_file_path”与“key_file_path"为真实的文件路径 +openssl genrsa -aes-256-ofb -out server.key 4096 ``` -> 注意:用户也可以选择关闭安全配置,通过运行命令```vega-security-config -s 0```来实现。关闭安全配置之后,训练服务器与推理服务器之间的通信将不再使用https而是https协议,无法保证通信安全。 -> -> 用户在关闭安全配置后,可以通过命令```vega-security-config -s 1```来重新开启安全配置。 -> +然后再执行如下命令,生成证书,并删除临时文件: -vega-security-config提供的操作vega.ini文件的命令总览如下: ```shell -# 1. 初始化vega.ini文件 -vega-security-config -i -# 2. 关闭安全配置 -vega-security-config -s 0 -# 3. 打开安全配置 -vega-security-config -s 1 -# 4. 查询当前的安全配置开关是否打开 -vega-security-config -q sec -# 5. 查询https的证书与密钥配置 -vega-security-config -q https -# 6. 配置https的证书与密钥文件路径 -vega-security-config -m https -c "cert_file_path" -k "key_file_path" -# 7. 只配置https的证书路径(在训练服务器上) -vega-security-config -m https -c "cert_file_path" -``` - -## 评估服务器 -### 评估服务器 https 安全配置 -#### 生成评估服务器密钥和证书 - -在评估服务器上执行以下操作 - -1.将/etc/pki/tls/openssl.cnf或者/etc/ssl/openssl.cnf拷贝到当前文件夹 - -2.修改当前目录下的openssl.cnf文件内容,在[ v3_ca ]段落中添加内容 -```ini -subjectAltName = IP:xx.xx.xx.xx +openssl req -new -key server.key -out server.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt +rm server.csr ``` -> 注意:xx.xx.xx.xx修改为推理服务器的IP地址 -> -3.生成服务器密钥 + +执行如下脚本生成评估服务客户端所使用的证书的加密私钥,执行该命令时,会提示输入加密密码,密码的强度要求如服务器端私钥,且和服务器段私钥密码不同,请记录好改密码,后继还需使用: + ```shell -openssl genrsa -aes-256-ofb -out example_key.pem 4096 +openssl genrsa -aes-256-ofb -out client.key 4096 ``` -> 注意:在这个阶段需要用户输入保护密钥的密码,此密码由用户自己记住,并且输入的密码强度需满足需求,具体的密码强度需求见下面的启动评估服务器章节 -> -4.生成证书请求文件 + +然后再执行如下命令,生成证书,并删除临时文件: + ```shell -openssl req -new -key example_key.pem -out example.csr -extensions v3_ca \ --config openssl.cnf +openssl req -new -key client.key -out client.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client.crt +rm client.csr ``` -5.生成自签名证书 + +### 3.2 生成普通证书 + +执行如下脚本,生成评估服务器端和客户端使用的证书的私钥和证书: + ```shell -openssl x509 -req -days 365 -in example.csr -signkey example_key.pem \ --out example_crt.pem -extensions v3_ca -extfile openssl.cnf +openssl genrsa -out server.key 4096 +openssl req -new -key server.key -out server.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt +rm server.csr + +openssl genrsa -out client.key 4096 +openssl req -new -key client.key -out client.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client.crt +rm client.csr ``` -6.设置密钥/证书权限 -为了确保系统安全,需要正确配置密钥/证书文件的权限,用户可以使用如下命令进行配置 + +## 4. 生成Dask使用的证书 + +执行如下脚本,生成Dask服务器端和客户端使用的证书的私钥和证书: + ```shell -sudo chmod 600 example_key.pem example_crt.pem +openssl genrsa -out server_dask.key 4096 +openssl req -new -key server_dask.key -out server_dask.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in server_dask.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server_dask.crt +rm server_dask.csr + +openssl genrsa -out client_dask.key 4096 +openssl req -new -key client_dask.key -out client_dask.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in client_dask.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client_dask.crt +rm client_dask.csr ``` -#### 评估服务器配置https密钥和证书 -将example_key.pem和example_crt.pem拷贝到```~/.vega```文件夹下 +删除CA私钥: -修改配置文件`~/.vega/vega.ini` 配置密钥和证书 -```ini -[security] -enable = True # 需要配置成True才能启用https加密通信 +```shell +rm ca.key +``` -[https] -cert_pem_file = /home//.vega/example_crt.pem # 修改username和证书文件名 -secret_key_file = /home//.vega/example_key.pem # 修改username和密钥文件名 +## 5. 加密私钥口令 + +若加密服务器使用加密证书,则需要执行本章节余下步骤,若使用普通证书,则跳过该章节。 + +加密生成评估服务的服务器端和客户端的私钥口令,需要安装华为公司KMC安全组件,并将该安全组件动态链接库所在的目录添加到`LD_LIBRARY_PATH`中。 + +```shell +export LD_LIBRARY_PATH=:$LD_LIBRARY_PATH ``` +接下来安装Vega,使用Vega的密码加密工具调用KMC安全组件对密码加密。 +在执行如下命令时,请输入在生成私钥时输入的口令,该命令会生成加密后的口令,请注意保存,在配置文件中会使用到这两个加密后的口令: -#### 评估服务器配置访问频率 -配置文件`~/.vega/vega.ini` 配置访问频率,默认限制每分钟最大100次访问 -```ini -[limit] -request_frequency_limit=5/minute # 配置为每分钟最大5次访问 +```shell +vega-encrypt_key --cert=server.crt --key=server.key --key_component_1=ksmaster_server.dat --key_component_2=ksstandby_server.dat +vega-encrypt_key --cert=client.crt --key=client.key --key_component_1=ksmaster_client.dat --key_component_2=ksstandby_client.dat ``` -#### 评估服务器配置请求大小限制 -配置文件`~/.vega/vega.ini` 配置请求大小限制,可以控制上传文件大小,默认配置 1G -```ini -[limit] -max_content_length=100000 # 配置请求大小最大100K +## 6. 配置安全配置文件 + +请在当前用户的主目录下创建`.vega`目录,并将如上生成的秘钥、证书、加密材料等,拷贝到该目录下,并改变权限: + +```shell +mkdir ~/.vega +mv * ~/.vega/ +chmod -R 600 ~/.vega ``` -#### 评估服务器配置白名单,仅可信的服务器连接评估服务器 -1. linux 白名单配置 - * 配置白名单: - ``` - sudo iptables -I INPUT -p tcp --dport 评估端口 -j DROP - sudo iptables -I INPUT -s 白名单IP地址1 -p tcp --dport 评估端口 -j ACCEPT - sudo iptables -I INPUT -s 白名单IP地址2 -p tcp --dport 评估端口 -j ACCEPT - sudo iptables -I INPUT -s 白名单IP地址3 -p tcp --dport 评估端口 -j ACCEPT - sudo iptables -I INPUT -s 白名单IP地址4 -p tcp --dport 评估端口 -j ACCEPT - ``` - * 如果需要从白名单中删除某一项 - 1. 查询白名单 ```sudo iptables -L -n --line-number``` - 2. 删除白名单 ```sudo iptables -D INPUT 查询的对应行编号``` +说明: -2. 配置文件 `.vega/vega.ini` 配置白名单 - * 在配置中的 limit.white_list 中配置白名单,用逗号分隔 - ```ini - [limit] - white_list=127.0.0.1,10.174.183.95 - ``` +1. 如上的秘钥、证书、加密材料也可以放到其他目录位置,注意访问权限要设置为`600`,并在后继的配置文件中同步修改该文件的位置。 +2. 在训练集群上,需要保留`ca.crt`、`client.key`、`client.crt`、`ksmaster_client.dat`、`ksstandby_client.dat`、`server_dask.key`、`server_dask.crt`、`client_dask.key`、`client_dask.crt`,并删除其他文件。 +3. 评估服务上,需要保留`ca.crt`、`server.key`、`server.crt`、`ksmaster_server.dat`、`ksstandby_server.dat`,并删除其他文件。 -#### 启动评估服务器 -在配置了以上安全配置项之后,用户用以下命令启动评估服务器 +在`~/.vega`目录下创建`server.ini`和`client.ini`。 -```vega-evaluate_service-service -i {your_ip_adress} -w {your_work_path}``` +在训练集群中,需要配置`~/.vega/server.ini`和`~/.vega/client.ini`: -其中`-i`参数指定当前使用的服务器的ip地址, -`-w`参数指定工作路径, 程序运行时的中间文件将存储在该目录下,请使用绝对路径。 其他可选参数的设置可查看该命令的帮助信息, 一般情况下建议采用默认值。 +server.ini: -在评估服务器启动时需要用户输入服务器密钥对应的密码(在生成密钥时输入的密码),系统会检查用户密码的强度,如果密码强度不符合需求,将会提示用户并自动退出。密码强度要求如下: +```ini +[security] + ca_cert=<~/.vega/car.crt> + server_cert_dask=<~/.vega/server_dask.crt> + server_secret_key_dask=<~/.vega/server_dask.key> + client_cert_dask=<~/.vega/client_dask.crt> + client_secret_key_dask=<~/.vega/ client_dask.key> ``` -1. 密码长度大于等于8位 -2. 必须包含至少1位大写字母 -3. 必须包含至少1位小写字母 -4. 必须包含至少1位数字 + +client.ini: + +```ini +[security] + ca_cert=<~/.vega/car.crt> + client_cert=<~/.vega/client.crt> + client_secret_key=<~/.vega/client.key> + encrypted_password=<加密后的client端的口令> #如果使用普通证书, 此项配置为空 + key_component_1=<~/.vega/ksmaster_client.dat> #如果使用普通证书, 此项配置为空 + key_component_2=<~/.vega/ksstandby_client.dat> #如果使用普通证书, 此项配置为空 ``` -## 训练服务器 -### 训练服务器安全配置 -训练服务器需要配置推理服务器的证书信息,才能正常向推理服务器发送请求进行推理。用户可以按照如下方法进行配置: +在评估服务器上,需要配置`~/.vega/vega.ini`: -修改配置文件`~/.vega/vega.ini` 配置密钥和证书 ```ini [security] -enable = True # 需要配置成True才能启用https加密通信 +ca_cert=<~/.vega/car.crt> +server_cert=<~/.vega/server.crt> +server_secret_key=<~/.vega/server.key> +encrypted_password=<加密后的server端的口令> #如果使用普通证书, 此项配置为空 +key_component_1=<~/.vega/ksmaster_server.dat> #如果使用普通证书, 此项配置为空 +key_component_2=<~/.vega/ksstandby_server.dat> #如果使用普通证书, 此项配置为空 +``` + +## 7. 配置评估服务守护服务 + +使用systemctl管理评估服务器进程,当进程出现异常时自动重启,保证评估服务器连续性。 + +首先创建一个启动评估服务的脚本`run_evaluate_service.sh`,内容如下,注意替换``、``为真实IP和目录: + +```shell +vega-evaluate_service-service -i -w +``` + +然后再创建一个守护服务的文件`evaluate-service`,脚本内容如下,注意替换为真实的脚本位置: -[https] -cert_pem_file = /home//.vega/example_crt.pem # 修改username和证书文件名 +```ini +[Unit] + Description=Vega Evaluate Service Daemon +[Service] + Type=forking + ExecStart=//run.sh + Restart=always + RestartSec=60 +[Install] + WantedBy=multi-user.target ``` -> 注意:这里的example_crt.pem为上面的步骤中生成的证书文件,用户需要手动将该证书文件拷贝到训练节点的对应目录下。 -### 训练服务器防火墙设置 -训练节点在进行多卡训练时需要启动dask和zmq服务,这些服务会随机监听本地127.0.0.1的27000 - 34000 端口。为了保护用户的服务不被恶意攻击,可以通过如下方式配置防火墙保护这些端口: +然后将`evaluate-service`拷贝到目录`/usr/lib/systemd/system`中,并启动该服务: ```shell -iptables -I OUTPUT -p tcp -m owner --uid-owner "user_id" -d 127.0.0.1 --match multiport --dports 27000:34000 -j ACCEPT -iptables -A OUTPUT -p tcp --match multiport -d 127.0.0.1 --dports 27000:34000 -j DROP +sudo cp evaluate-service /usr/lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start evaluate-service ``` -其中```"user_id"```需要用户执行命令```id "username"```查看用户的id并镜像替换。 -> 注意:该配置限制了所有其他用户对端口27000-34000的访问,在多用户环境下如果其他用户也需要运行vega训练任务,需要使用其他用户的id去运行第一条命令,以便使该用户添加到防火墙的白名单中。 -> +## 8. 安装Dask和distributed + +安装Vega时,会自动安装Dask和Distributed的最新版本,我们发现在当前版本中Distributed关闭dash board时存在bug,需要执行如下命令,安装如下版本的这两个组件: + +```shell +pip3 install --user dask==2.11.0 +pip3 install --user distributed==2.11.0 +``` + +## 9. 配置HCCL白名单 + +请参考Ascend提供的[配置指导](https://support.huawei.com/enterprise/zh/doc/EDOC1100206668/8e964064)。 + +## 10. 注意事项 + +### 10.1 模型风险 + +对于AI框架来说,模型就是程序,模型可能会读写文件、发送网络数据。例如Tensorflow提供了本地操作API tf.read_file, tf.write_file,返回值是一个operation,可以被Tensorflow直接执行。 +因此对于未知来源的模型,请谨慎使用,使用前应该排查该模型是否存在恶意操作,消除安全隐患。 + +### 10.2 运行脚本风险 + +Vega提供的script_runner功能可以调用外部脚本进行超参优化,请确认脚本来源,确保不存在恶意操作,谨慎运行未知来源脚本。 + +### 10.3 KMC组件不支持多个用户同时使用 + +若使用KMC组件对私钥密码加密,需要注意KMC组件不支持不同的用户同时使用KMC组件。若需要切换用户,需要在root用户下,使用如下命令查询当前信号量: + +```bash +ipcs +``` + +然后删除查询到的当前所有的信号量: + +```bash +ipcrm -S '<信号量>' +``` diff --git a/docs/en/algorithms/adelaide_ea.md b/docs/en/algorithms/adelaide_ea.md index 05e6e7b..02dc25b 100644 --- a/docs/en/algorithms/adelaide_ea.md +++ b/docs/en/algorithms/adelaide_ea.md @@ -89,8 +89,6 @@ mutate: type: AdelaideMutate codec: AdelaideCodec max_sample: 100 - pareto_front_file: "{local_base_path}/output/random/pareto_front.csv" - random_file: "{local_base_path}/output/random/random.csv" ``` ## 3. Dataset @@ -101,13 +99,6 @@ The dataset for image semantic segmentation needs to include RGB images and corr ### 4. Output -The output includes a series of .pth files (models trained to the num_iter iteration times in the configuration file), the result.csv file, and the pause_front.csv file. The result.csv file records all search models, and the pareto_front.csv file records all pareto_front models. The .csv file contains encoding, flops, parameters, and mIOU. +The output includes model files, network description files, performance files. +The network descrition files has encoding item, using a 19-character string indicates the structure of a model, which ends with an underscore (_) to avoid record errors caused by encoding starting with 0. -1. encoding: A 19-character string indicates the structure of a model, which ends with an underscore (_) to avoid record errors caused by encoding starting with 0. -2. flops: Records the macc value of the model. For example, 1371603728 indicates 1.277 GB. -3. parameters: Records the values of parameters in the model. For example, 3162900 indicates 3.016 MB. -4. mIOU: Records the segmentation performance. - -## 5. Benchmark - -For details, see the benchmark configuration item in the [adelaide_ea.yml](https://github.com/huawei-noah/vega/blob/master/examples/nas/adelaide_ea/adelaide_ea.yml) configuration file. diff --git a/docs/en/algorithms/modnas.md b/docs/en/algorithms/modnas.md index f8b19a8..a84383d 100644 --- a/docs/en/algorithms/modnas.md +++ b/docs/en/algorithms/modnas.md @@ -303,10 +303,6 @@ search_space: Now we have a supernet on top of the base model where the original convolution operators are replaced with specified mixed operators and primitives. A search routine can then be set up by matching the search space with selected Optimizer and Estimators. -## Known Issues - -- Currently the ModularNAS routine runs in a separate thread and listens on condition variables in Vega, which might lead to deadlocks. - ## Reference [^fn1]: Liu, H., Simonyan, K., and Yang, Y. Darts: Differentiable architecture search. ArXiv, abs/1806.09055, 2019b. diff --git a/docs/en/algorithms/nago.md b/docs/en/algorithms/nago.md index 37dbdc5..430cc1c 100644 --- a/docs/en/algorithms/nago.md +++ b/docs/en/algorithms/nago.md @@ -88,7 +88,4 @@ search_algorithm: ### 5. Output -The following two files are generated in the specified output directory (the default directory is `./example/tasks//output/nas/`): - -- The `output.csv` file contains the best architecture generator hyperparameters found by BOHB -- The `reports.csv` file contains all the architecture generator hyperparameters queried by BOHB at different epoch. +The best hpyerparameters in file `desc_nn.json` in folder `./example/tasks//output/nas/`. diff --git a/docs/en/algorithms/pba.md b/docs/en/algorithms/pba.md index 60a8ce6..bfcaafd 100644 --- a/docs/en/algorithms/pba.md +++ b/docs/en/algorithms/pba.md @@ -105,18 +105,3 @@ The PBA algorithm uses the default parameters in the parameter configuration fil |:--:|:--:|:--:|:--:|:--:| |Ho et at.,2019|96.13%|96.92%|97.32%|97.42%| |Vega Pipeline|96.26%|97.18%| \ |97.57%| - -The final output files and directories are as follows: - -```text -output: - best_hps.json: best augmentation policy schedule obtained by the PBA algorithm and the ID and score of the search phase - hps.csv: ID and score of 16 groups of augmentation policy schedules obtained by the PBA algorithm in the search phase - score_board.csv: score and status of each round of iteration of the 16 groups of data augmentation operations obtained in the algorithm search phase. -workers: - hpo: The 16 folders are the final results of the 16 groups of models, including the score and model. - 0: - 1: - ... - 16: -``` diff --git a/docs/en/algorithms/quant_ea.md b/docs/en/algorithms/quant_ea.md index ffc744c..aa4976e 100644 --- a/docs/en/algorithms/quant_ea.md +++ b/docs/en/algorithms/quant_ea.md @@ -66,8 +66,4 @@ The two phases ("nas" and "fully_train") are performed in sequence. The Pareto f ### 5. Algorithm output -The following two files are generated in the specified output directory: - -- The model on the found Pareto front after fully training. -- The result.csv file contains the encoding, flops, parameters, and accuracy of all models during the search process. -- pareto_front.csv contains the found pareto front information. +The following two files are generated in `./tasks//output/nas/`. diff --git a/docs/en/algorithms/sp_nas.md b/docs/en/algorithms/sp_nas.md index 43b3ed4..7547fdc 100644 --- a/docs/en/algorithms/sp_nas.md +++ b/docs/en/algorithms/sp_nas.md @@ -130,7 +130,6 @@ fine_tune: models_folder: "{local_base_path}/output/parallel/" # Get desc file and weights file from parallel pipe step ``` - ### Algorithm output - The optimal models with fully training. @@ -138,4 +137,4 @@ fine_tune: ## Benchmark -Benchmark configuration: [sp_nas.yml](https://github.com/huawei-noah/vega/tree/master/examples/nas/sp_nas/spnas.yml) +Benchmark configuration: [spnas.yml](https://github.com/huawei-noah/vega/blob/master/examples/nas/sp_nas/spnas.yml) diff --git a/docs/en/algorithms/sr_ea.md b/docs/en/algorithms/sr_ea.md index fe4751d..e0b79de 100644 --- a/docs/en/algorithms/sr_ea.md +++ b/docs/en/algorithms/sr_ea.md @@ -79,8 +79,4 @@ mutate: ### Output -The outputs are as follows: - -• The model on the found Pareto front after fully training. -• Logs of all models in random search and evolutionary search process (result.csv) -• Logs of Pareto front results (pareto_front.csv). +The outputs is the model on the found Pareto front after fully training. diff --git a/docs/en/developer/developer_guide.md b/docs/en/developer/developer_guide.md index 2a4dd3a..72f7b08 100644 --- a/docs/en/developer/developer_guide.md +++ b/docs/en/developer/developer_guide.md @@ -1,18 +1,11 @@ # Development Reference +**Outdated and to be updated.** + ## 1. Introduction The key features of Vega are network architecture search and hyperparameter optimization. In the network architecture search process, the search space and search algorithm are the core parts, and the generator is used to control the sampling, update, and end of the search process. -The following figure shows the class diagram of the search space and search algorithm. - -![Search Space diagram](../../images/search_space_classes.png) - -The following figure shows the search space and search algorithm process. - -![Search Space process](../../images/search_space_flow.png) - -Search space process The following describes the following parts: - search space diff --git a/docs/en/developer/fine_grained_search_space.md b/docs/en/developer/fine_grained_search_space.md deleted file mode 100644 index 52dd337..0000000 --- a/docs/en/developer/fine_grained_search_space.md +++ /dev/null @@ -1,211 +0,0 @@ -# Search space and Fine-Grained Network guidance -## 1. Fine-grained Introduction -In most Automl algorithms, the search space is closely related to the network. Each search algorithm defines a series of search space and network types that are identified by the search space and network types. Most of these network types are slightly modified on the basic network, resulting in network reuse failure. In addition, the search space and search algorithm are strongly coupled. Each algorithm has its own search space definition. This search space can only be used in specific scenarios and lacks universality and scalability. -After analyzing these problems, we propose a general searchspace fine-grained network solution. - -- Unified search space definition mode. The same search space can adapt to different search algorithms. -- Reuses basic networks, provides fine-grained networks, and constructs different types of networks through combinations. -- The search space can be expanded freely based on the defined network. -- Multiple backends are supported. -## 2. Fine-grained demonstration -### 2.1 Building a Network with Fine Grain -- Inherit the Module base class and call `@ClassFactory.register(ClassType.NETWORK)` to register the network. -- The pytorch style is used. The `self.xx` variable is placed in the module. By default, the variable is executed in sequence. -- If you need to customize the execution sequence of modules, rewrite the `call` method. -```python -from vega.common import ClassFactory, ClassType -from vega.modules.module import Module -from vega.modules.operators import ops -@ClassFactory.register(ClassType.NETWORK) -class SimpleCnn(Module): - def __init__(self, block_nums=3, filters=32, kernel_size=3): - super(SimpleCnn, self).__init__() - in_channels = 3 - out_channels = filters - output_size = 32 - for i in range(block_nums): - block = ConvBlock(in_channels, out_channels, kernel_size) - self.add_module("block{}".format(i), block) - in_channels = out_channels - output_size = (output_size - kernel_size + 1) // 2 - self.fc1 = ops.Linear(in_channels * output_size * output_size, 120) - self.relu = ops.Relu() - self.fc2 = ops.Linear(120, 10) - -@ClassFactory.register(ClassType.NETWORK) -class ConvBlock(Module): - def __init__(self, in_channels, out_channels, kernel_size=3): - super(ConvBlock, self).__init__() - self.conv = ops.Conv2d(in_channels, out_channels, kernel_size) - self.bn = ops.BatchNorm2d(out_channels) - self.relu = ops.Relu() - self.pool = ops.MaxPool2d((2, 2)) - def call(x): - x = self.conv(x) - x = self.bn(x) - x = self.relu(x) - return self.pool(x) -model = SimpleCnn() -print(model) -``` -### 2.2. Define Search Space and Use Random Search -- Config in pipeline -```yaml -pipeline: [hpo] - -hpo: - pipe_step: - type: SearchPipeStep - - search_algorithm: - type: RandomSearch - - search_space: - type: SearchSpace - hyperparameters: - - key: backbone.block1.conv.in_channels - type: CATEGORY - range: [8, 16, 32, 64, 128, 256] - model: - model_desc: - modules: ["backbone"] - backbone: - type: SimpleCnn - dataset: - type: Cifar10 - common: - data_path: /cache/datasets/cifar10/ - batch_size: 256 - trainer: - type: Trainer - epochs: 1 -``` -- Use SearchSpace in code. -```python -from vega.algorithms.hpo.random_hpo import RandomSearch -from vega.core.search_space import SearchSpace -from vega.core.search_space.param_types import ParamTypes -from vega.core.search_space.params_factory import ParamsFactory -from vega.networks.network_desc import NetworkDesc - -# Definition of SearchSpace -params = ParamsFactory.create_search_space( -param_name='backbone.block1.conv.in_channels', -param_type=ParamTypes.CATEGORY, -param_range=[8, 16, 32, 64, 128, 256]) -search_space = SearchSpace().add_hp(params) -# Search algorithm -id, desc = RandomSearch(search_space).search() -# Parse into a model. -model = NetworkDesc(desc).to_model() -print(model) -``` -## 3. Module Groups -To facilitate the reuse of network modules, fine-grained modules are grouped based on their functions. Each group has its own features. -- **Networks**: defines a common network, which is a coarse-grained network, such as ResNet and FasterRCNN. Networks are submodules in other groups. -- **Backbone**: backbone network. Generally, the backbone+head mode is used to form a network. In many scenarios, we can flexibly replace different backbones to process different featureMaps. -- **Head**: used for feature fusion, for example, as a classification or regression problem. This ensures that different heads are replaced to accommodate different scenarios. -- **Cells:** Multiple blocks are combined. Multiple cells are defined to define combined scenarios. -- **Blocks**: consists of basic operators and forms a block with specific functions. We provide some common blocks that can be used in different networks. -- **Connections**: defines the connection relationships between modules, including Sequential and Add, and the implementation statements of some condition branches, such as Repeat. -- **Operators:** Defines underlying operators, such as conv and batch_normal. Each operator is adapted to multiple platforms to unify external input, output, and interface invoking. -For example, the composition of a ResNet18 is as follows: -![resnet](../../images/resnet.png) - -## 4. Definition of Search Space - -The search space consists of **hyper_parameters** and **condition**. -**hyper_parameters** -Specifies the definition of a hyperparameter, including key, type, and value. key indicates the name of a hyperparameter, and type indicates the type of a hyperparameter, that is, ParamType. The system selects a sampling mode based on ParamType. range: specifies the sampling range. -The following param types are preconfigured: - -- **INT**: indicates that a value is sampled from an integer range. If the value range is [0, 10], a value is randomly sampled from 0 to 10. -- **INT_EXP:** A value in the integer range is sampled in the exponential sampling mode of 10. For example, if range is [0, 1000], the value is mapped to [0, 10, 100, 1000] through the log function. -- **INT_CAT**: Select a value from multiple INT types, for example, range=[16, 32, 64, 128]. -- **FLOAT:** Sampling a value from a floating range. For example, if range is [0.001, 1], a value is sampled. -- **FLOAT_EXP**: sample a value in the Float type range in exponential sampling mode of 10. For example, if range is [0.001, 1], the value is mapped to [1, 0.01, 0.001] through the log function. -- **FLOAT_CAT:** indicates that a value is selected from multiple FLOAT types, for example, range=[0.1, 0.01, 0.001, 0.99]. -- **STRING:** indicates that one character string is selected from multiple character strings, for example, range=['block1','block2','block3','block4']. -**condition** -Indicates the relationship between two nodes. A child node takes effect only when the parent node meets certain conditions. -![img](http://hi3ms-image.huawei.com/hi/staticimages/hi3msh/images/2019/0731/15/5d414a699c009.png)![img](http://image.huawei.com/tiny-lts/v1/images/9ed3126327ed5a8abb80_844x290.png@900-0-90-f.png) -The value or range of the condition is transferred by using condition_range. Specifically: -- **EQUAL**: condition_range can contain only one parent value, indicating that the child is selected. The value of parent must be equal to **. -- **NOT_EQUAL**: condition_range can contain one or more values of parent, indicating that child is selected. The value of parent ** must not be equal to all values provided in **condition_range. -- **IN**: If parent is of the range type, condition_range must contain two values, indicating the minimum value and maximum value of cond_range. If child is selected, the current value of parent must be within the range of cond_range. If parent is of the CAT type, condition_range must contain one or more parent values. If child is selected, the current parent value must be within a certain value in condition_range. -**forbidden** -Indicates the mutually exclusive relationship between values of two nodes. If node 1 contains a value, some values of node 2 are not selected. -## 5. Support for Multiple Backends -We encapsulate the underlying architecture and unify upper-layer interfaces to adapt to multiple backends. The core functions are as follows: -- **Module**: base class to be inherited for implementing customized modules, which unifies the implementation of internal module operations on each platform. -- **ops**: upper-layer operator invoking interface, which unifies the names, input, and output of the same functional operator on different platforms. -- **Serializable:** Extracts and parses hyperparameters and hierarchies in the module, and serializes them into a JSON dictionary. -![fine_grained_space](../../images/fine_grained_space.png) - -## 6. How to Develop Fine-Grained Networks - -For algorithm developers, we want them to focus on the development of search algorithms for network structure and hyperparameters rather than the construction of the network itself. Currently, some modules and networks have been preconfigured that can provide the hyperparameter definition and architecture definition description of this type of network. Algorithm developers only need to assemble new networks using search algorithms based on the description. -### 6.1 Defining a Module -To facilitate your use, we inherit the development habits of pytorch. Only a few lines of changes are required to become a module of fine granularity. -- Inherit the Module class and register it with the `ClassFactory.register(ClassType.NETWORK)`. -- Replace the operator in nn with the operator in ops. -- For the network structure that is executed in sequence, the network is generated in the sequence of self by default, and the forward method does not need to be implemented. -```python -@ClassFactory.register(ClassType.NETWORK) -class ConvBlock(Module): - def __init__(self, in_channels, out_channels, kernel_size=3): - super(ConvBlock, self).__init__() - self.conv = ops.conv2d(in_channels, out_channels, kernel_size) - self.bn = ops.batch_norm2d(out_channels) - self.relu = ops.relu() - self.pool = ops.max_pool2d((2, 2)) -``` -- If special processing is required for input, rewrite the `call` method as required. -```python -@ClassFactory.register(ClassType.NETWORK) -class MixedOp(Module): - def __init__(self, C, stride, ops_cands): - """Init MixedOp.""" - super(MixedOp, self).__init__() - self.add_spaces(ops_cands, OPS[ops_cands](C, stride, True)) - - def call(self, x, weights=None, *args, **kwargs): - """Call function of MixedOp.""" - if weights is None: - for model in self.children(): - x = model(x) - return x - return ops.add_n(weights[idx] * op(x) for idx, op in enumerate(self.children()) if weights[idx] != 0) -``` -### 6.2 Using Connections to Assemble Multiple Modules -By default, multiple networks are assembled in Sequential mode. When other connection methods are used, you need to manually invoke the connection method. In the following example, Add is used to add and combine the two networks. -```python -@ClassFactory.register(ClassType.NETWORK) -class BasicBlock(Module): - """Create BasicBlock SearchSpace.""" - def __init__(self, inchannel, outchannel, groups=1, base_width=64, stride=1): - super(BasicBlock, self).__init__() - base_conv = BasicConv(inchannel, outchannel) - shortcut = ShortCut(inchannel, outchannel) - self.add_block = Add(base_conv, shortcut) - self.relu = ops.relu() -``` -Developers can also define connections as follows: -- Inherit `ConnectionsDecorator` and register with `ClassFactory.register(ClassType.NETWORK)` -- The input parameter of the `init` function is `*models`, indicating that multiple modules are accepted. We will automatically invoke add_module to set these modules to modules. -- Rewrite the `call` method, use `self.children()` to obtain added modules, and perform detailed operations. -```python -@ClassFactory.register(ClassType.NETWORK) -class Sequential(ConnectionsDecorator): - """Sequential Connections.""" - def __init__(self, *models): - super(Sequential, self).__init__(*models) - - def compile(self, inputs): - """Override compile function, conect models into a seq.""" - output = inputs - models = self.children() - for model in models: - output = model(output) - return output -``` diff --git a/docs/en/developer/new_algorithm.md b/docs/en/developer/new_algorithm.md index 02bb857..4627fe6 100644 --- a/docs/en/developer/new_algorithm.md +++ b/docs/en/developer/new_algorithm.md @@ -1,5 +1,7 @@ # Algorithm Development Guide +**Outdated and to be updated.** + New algorithms, such as new network search algorithms, model compression algorithm, hyperparameter optimization algorithms, and data augmentation algorithms, need to be extended based on the basic classes provided by Vega. The core of the AutoML algorithm is search space, search algorithm, network construction and evaluation. The new algorithm mainly considers these aspects. ## 1. Add a schema search algorithm diff --git a/docs/en/user/ascend_910.md b/docs/en/user/ascend_910.md new file mode 100644 index 0000000..f666e46 --- /dev/null +++ b/docs/en/user/ascend_910.md @@ -0,0 +1,153 @@ +# Deploy the Ascend environment. + +Deploy the Ascend environment by referring to the Ascend official document. The following +installation guide is a key step during the installation. If an error occurs during the +installation, refer to the official document. +Before the deployment, download the installation package from the official website. + +## 1 Check the install Driver and CANN Versions + +For a new Ascend host, check whether the `/usr/local/HiAi` directory exists. If yes, +run the following command as root user to uninstall the directory: + +```bash +/usr/local/HiAi/uninstall.sh +``` + +Run the following commands as a non-root user to create the `Ascend` directory +and make the directory accessible to the `HwHiAiUser` user: + +```bash +mkdir /usr/local/Ascend/ +sudo chown -R :HwHiAiUser /usr/local/Ascend/ +sudo chmod -R 750 /usr/local/Ascend/ +``` + +If `/usr/local/Ascend/` exists, check whether the old Driver and CANN packages have been +installed before the installation. Run the following command to query the version number of +each component: + +```bash +cat /usr/local/Ascend/driver/version.info +cat /usr/local/Ascend/firmware/version.info +cat /usr/local/Ascend/nnae/latest/ascend_nnae_install.info +cat /usr/local/Ascend/ascend-toolkit/latest/arm64-linux/ascend_toolkit_install.info +cat /usr/local/Ascend/tfplugin/latest/ascend_tfplugin_install.info +``` + +If the version is older than expected, uninstall it as root user. + +```bash +/usr/local/Ascend/driver/script/uninstall.sh +/usr/local/Ascend/firmware/script/uninstall.sh +/usr/local/Ascend/nnae/latest/script/uninstall.sh +/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/script/uninstall.sh +/usr/local/Ascend/tfplugin/latest/script/uninstall.sh +``` + +If nnae, ascend-toolkit, and tfplugin are not installed by the root user, uninstall them as the user. + +## 2 Install the driver and CANN + +Run the following command as the root user to install the software. The following version +is for reference only: + +```bash +chmod +x *.run +./A800-9000-npu-driver_21.0.3.1_linux-aarch64.run --full +./A800-9000-npu-firmware_1.79.22.4.220.run --full +``` + +Run the following command to check whether the installation is successful: + +```bash +npu-smi info +``` + +Before installing other packages as a non-root user, set this user to the same group as `HwHiAiUser`. + +```bash +usermod -a -G HwHiAiUser +``` + +```bash +./Ascend-cann-nnae_5.0.T306_linux-aarch64.run --install +./Ascend-cann-nnrt_5.0.T306_linux-aarch64.run --install +./Ascend-cann-tfplugin_5.0.T306_linux-aarch64.run --install +./Ascend-cann-toolkit_5.0.T306_linux-aarch64.run --install +``` + +After the installation is completed, restart the host as prompted. + +## 3 Configure rank_table_file. + +Run the `hccn_tool` command to generate `rank_table_file` by referring to the official Ascend document. + +## 4 Configure environment Variables + +The following environment variables need to be configured. +You are advised to place them in the `~/.bashrc` directory: + +```bash +export HOME_DIR=/home/ +export HOST_ASCEND_BASE=/usr/local/Ascend +export JOB_ID= +export DEVICE_ID=0 +export RANK_TABLE_FILE= +export RANK_ID=0 +export RANK_SIZE=8 +export NPU_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +export BATCH_TASK_INDEX=0 +export TF_CPP_MIN_LOG_LEVEL=3 +export LD_PRELOAD=export LD_PRELOAD=/lib64/libgomp.so.1:$HOME_DIR/.local/lib/python3.7/site-packages/sklearn/__check_build/../../scikit_learn.libs/libgomp-d22c30c5.so.1.0.0 +export GLOG_v=3 +export USE_NPU=True +source /usr/local/Ascend/tfplugin/set_env.sh +source /usr/local/Ascend/ascend-toolkit/set_env.sh +source /usr/local/Ascend/nnae/set_env.sh +export PATH=$HOME_DIR/.local/bin:$PATH +export PYTHONPATH=$HOME_DIR/.local/lib/python3.7/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=$HOME_DIR/.local/lib/python3.7/site-packages/vega/security/kmc/aarch64:$LD_LIBRARY_PATH +``` + +In the preceding command, `` is the current user name. `` must be an integer, +for example, `10087`. `` must be the full path of the file. + +## 5 Install Vega and Dependency Packages + +Upgrade the PIP to the latest version. + +```bash +pip3 install --user --upgrade pip +``` + +Install the nnae, topi, and hccl component packages. + +```bash +export fwk_path=' /usr/local/Ascend/nnae/latest' +export te_path=${fwk_path}'/fwkacllib/lib64/te-*.whl' +export topi_path=${fwk_path} '/fwkacllib/lib64/topi-*.whl' +export hccl_path=${fwk_path} '/fwkacllib/lib64/hccl-*.whl' +pip3 install --user ${te_path} +pip3 install --user ${topi_path} +pip3 install --user ${hccl_path} +``` + +Install noah-vega. Do not install the dependency package because of the special environment of Ascend. + +```bash +pip3 install --user --no-deps noah-vega +``` + +Run the following command to view the Vega dependency package: + +```bash +pip3 show noah-vega +``` + +Note that the following versions must be installed for the dask and distributed packages: + +```bash +pip3 install --user distributed==2021.7.0 +pip3 install --user dask==2021.7.0 +``` \ No newline at end of file diff --git a/docs/en/user/config_reference.md b/docs/en/user/config_reference.md index c6e61b3..dd66fcf 100644 --- a/docs/en/user/config_reference.md +++ b/docs/en/user/config_reference.md @@ -157,6 +157,7 @@ fully_train: common: data_path: /cache/datasets/cifar10/ ``` +**Note**: HCCL supports multi-machine multi-card, Horovod currently only supports single machine multi-card. ## 3. NAS and HPO configuration items diff --git a/docs/en/user/deployment.md b/docs/en/user/deployment.md index 6452edb..31a9c3e 100644 --- a/docs/en/user/deployment.md +++ b/docs/en/user/deployment.md @@ -6,14 +6,14 @@ The following conditions must be met when the Vega is deployed in a local cluster: -1. Ubuntu 18.04 or later -2. CUDA 10.0 -3. Python 3.7 -4. pip +1. Ubuntu 18.04 or EulerOS 2.0 SP8 +2. CUDA 10.0 or CANN 20.1 +3. Python 3.7 or later +4. pytorch, tensorflow(>1.14, <2.0) or mindspore **Note: If you need to deploy the Ascend 910 cluster, contact us.** -During cluster deployment, you need to install the Vega and some mandatory software packages on each cluster node by running the following commands: +During cluster deployment, you need to install the Vega: ```bash pip3 install --user --upgrade noah-vega @@ -25,23 +25,6 @@ After installing the preceding software on each host, you need to configure SSH After the preceding operations are complete, the cluster has been deployed. -### 1.2 Verify - -After the cluster is deployed, run the following command to check whether the cluster is available: - -```bash -vega-verify-cluster -m -s ... -n -``` - -For example: - -```bash -vega-verify-cluster -m 192.168.0.2 -s 192.168.0.3 192.168.0.4 -n /home/alan/nfs_folder -``` - -After the verification is complete, the message "All cluster check items have passed." is displayed. -If an error occurs during the verification, please adjust the cluster based on the exception information. - ## Reference ### Install MPI @@ -74,6 +57,24 @@ Any two hosts on the network must support SSH mutual trust. The configuration me ### Building NFS +NFS is a widely used system for data sharing in a cluster. If an NFS system already exists in the cluster, use the existing NFS system. + +The following instructions for configuring NFS may not apply to all NFS systems. Adjust the instructions based on the actual cluster environment. + +Before configuring the NFS server, check whether the UID of the current user on each host in the cluster are the same. If the UID are different, the NFS shared directory cannot be accessed. In this case, you need to change the UID of the current user to the same value to avoid conflicts with the UIDs of other users. + +To query the UID of the current user, run the following command: + +```bash +id +``` + +Change the current UID (Change the value with caution, please contact the cluster system administrator for help): + +```bash +sudo usermod -u +``` + NFS server settings: 1. Install the NFS server. @@ -95,13 +96,7 @@ NFS server settings: sudo bash -c "echo '/home//nfs_cache *(rw,sync,no_subtree_check,no_root_squash,all_squash)' >> /etc/exports" ``` -4. Set the shared directory to the `nobody` user. - - ```bash - sudo chown -R nobody: //nfs_cache - ``` - -5. Restart the NFS server. +4. Restart the NFS server. ```bash sudo service nfs-kernel-server restart diff --git a/docs/en/user/evaluate_service.md b/docs/en/user/evaluate_service.md deleted file mode 100644 index 54902d6..0000000 --- a/docs/en/user/evaluate_service.md +++ /dev/null @@ -1,323 +0,0 @@ -# Evaluate Service - -## 1. Introduction - -The model evaluation service is used to evaluate the performance of a model on a specific hardware device, such as the accuracy, model size, and latency of a pruned and quantized model on the Atlas 200 DK. - -Currently, the evaluation service supports Davincit inference chips (Atlas 200 DK, ATLAS300, and development board environment Evb) and mobile phones. More devices will be supported in the future. - -The evaluation service uses the CS architecture. The evaluation service is deployed on the server. The client sends an evaluation request to the server through the `REST` interface and obtains the result. Vega can use the evaluation service to detect model performance in real time during network architecture search. After a candidate network is generated in the search phase, the network model can be sent to the evaluation service. After the model evaluation is complete, the evaluation service returns the evaluation result to Vega. Vega performs subsequent search based on the evaluation result. This real-time evaluation on the actual device helps to search for a network structure that is more friendly to the actual hardware. - -## 2. spec - -Supported Models and Hardware Devices: - -| Algorithm | Model | Atlas 200 DK |Atlas 300 | Bolt | -| :--: | :--: | :--: | :--: | :--: | -| Prune-EA | ResNetGeneral | √ | √ | √ | -| ESR-EA | ESRN | | √ | √ | -| SR-EA | MtMSR | | √ | √ | -| Backbone-nas | ResNet| √| √ | | -| CARS | CARSDartsNetwork | | √ | | -| Quant-EA | ResNetGeneral | √ | √ | √ | -| CycleSR | CycleSRModel | | | | -| Adlaide-EA | AdelaideFastNAS | | √ | | -| Auto-Lane | ResNetVariantDet | | | -| Auto-Lane | ResNeXtVariantDet | | | - -## 3. Evaluation Service Deployment - -### 3.1 Environment Installation and Configuration (Optional) - -Configure the hardware (Atlas 200 DK, Atlas 300, or mobile phone) by following the instructions provided in the following sections. - -### 3.1.1 Installing the Atlas 200 DK Environment (Optional) - -#### 3.1.1.1 Preparations - -1. An 8 GB or larger SD card and a card reader are available. -2. A server where Ubuntu 16.04.3 has been installed. -3. Download the system image: [ubuntu-16.04.3-server-arm64.iso](http://old-releases.ubuntu.com/releases/16.04.3/ubuntu-16.04.3-server-arm64.iso) -4. Download the make_sd_card.py and make_ubuntu_sd.sh from . -5. Download the developer running package mini_developerkit-1.3.T34.B891.rar from . -6. Decompress the developer package and upload it to the user directory. - -#### 3.1.1.2 Installing and Configuring the Atlas200 DK - -1. Insert the SD card into the card reader and connect the card reader to the USB port on the Ubuntu server. -2. Install dependencies on the Ubuntu server: - - ```bash - apt-get install qemu-user-static binfmt-support python3-yaml gcc-aarch64-linux-gnu g++-aarch64-linux-gnu - ``` - -3. Run the following command to query the name of the USB device where the SD card is located: - - ```bash - fdisk -l - ``` - -4. Run the SD card making script to make a card. The USB device name is the name obtained in the previous step. - - ```bash - python3 make_sd_card.py local USB Device Name - ``` - -5. After the card is created, remove the SD card from the card reader, insert the SD card into the card slot of the Atlas 200 DK developer board, and power on the Atlas 200 DK developer board. - -#### 3.1.1.3 Installing and Configuring the Evaluation Server Environment - -1. Downloading and Installing the DDK Package and Synchronizing the Library - - Download address: - For details about the installation procedure, see the official document: - -2. Configuring the Cross Compilation Environment - To install the compilation environment required by the Atlas 200 DK on the evaluation server, run the following command: - - ```bash - sudo apt-get install g++-aarch64-linux-gnu - ``` - -3. Configure the following environment variables in `/etc/profile` of the server. The value of `/home/` in the file must be a specific path. - - ```bash - export DDK_PATH=/home//huawei/ddk - export PYTHONPATH=$DDK_PATH/site-packages/te-0.4.0.egg:$DDK_PATH/site-packages/topi-0.4.0.egg - export LD_LIBRARY_PATH=$DDK_PATH/uihost/lib:$DDK_PATH/lib/x86_64-linux-gcc5.4 - export PATH=$PATH:$DDK_PATH/toolchains/ccec-linux/bin:$DDK_PATH/uihost/bin - export TVM_AICPU_LIBRARY_PATH=$DDK_PATH/uihost/lib/:$DDK_PATH/uihost/toolchains/ccec-linux/aicpu_lib - export TVM_AICPU_INCLUDE_PATH=$DDK_PATH/include/inc/tensor_engine - export TVM_AICPU_OS_SYSROOT=/home//tools/sysroot/aarch64_Ubuntu16.04.3 - export NPU_HOST_LIB=/home//tools/1.32.0.B080/RC/host-aarch64_Ubuntu16.04.3/lib - export NPU_DEV_LIB=/home//tools/1.32.0.B080/RC/host-aarch64_Ubuntu16.04.3/lib - ``` - -4. Configuring SSH Mutual Trust - File transfer and remote command execution are required between the evaluation server and the Atlas 200 DK. Therefore, you need to configure SSH mutual trust in the two environments to ensure that the script can be automatically executed. - - a. Install the SSH. `sudo apt-get install ssh` - b. Generate a key. The `ssh-keygen -t rsa` command generates the id_rsa and id_rsa.pub files in the ~/.ssh/ directory. id_rsa.pub is the public key. - c. Check the authorized_keys file in the directory. If the file does not exist, create it and run the `chmod 600 ~/.ssh/authorized_keys` command to change the permission. - d. Copy the public key. Copy the content of the public key id_rsa.pub to the authorized_keys file on another host. - **Note**: Perform the preceding steps on the evaluation server and Atlas 200 DK separately to ensure SSH trust between the two servers. - -### 3.1.2 Installing and Configuring the Atlas 300 Environment (Optional) - -For details, see the Huawei official tutorial at . - -Note: The preceding documents may be updated. Please follow the released updates or obtain the corresponding guide documents. After the environment is installed, you need to set environment variables. For details, see the preceding guide. To facilitate environment configuration, we provide the environment variable configuration template [env_atlas300.sh](https://github.com/huawei-noah/vega/blob/master/evaluate_service/hardwares/davinci/env/env_atlas300.sh) for your reference. The actual environment prevails. - -The installation of the Atlas300 environment is complex. To ensure that the environment is correctly installed, please run [check_atlas300.sh](https://github.com/huawei-noah/vega/blob/master/evaluate_service/hardwares/davinci/env/check_atlas300.sh). - -### 3.1.3 Installing and Configuring the Mobile Phone Environment (Optional) - -#### 3.1.3.1 Preparations - -1. Prepare a Kirin 980 mobile phone. Nova 5 is recommended. -2. A server where Ubuntu 16.04.3 has been installed. - -#### 3.1.3.2 Installing and Configuring the Evaluation Server and Mobile Phone - -1. Install the adb tool on the Linux server. - - ```bash - apt install adb - ``` - -2. Connect the mobile phone to the evaluation server through the USB port, enable the developer option, and run the following command on the evaluation server: - - ```bash - adb devices - ``` - - If the following information is displayed, the connection is successful: - - ```text - List of devices attached - E5B0119506000260 device - ``` - -#### 3.1.3.3 Handling Device Connection Failures - -If you cannot obtain the device by running the `adb devices` command on the server, perform the following steps to connect to the device: - -1. Run the `lsusb` command on the evaluation server. The device list is displayed. Find the device ID. - -2. Edit the 51-android.rules file. - - ```bash - sudo vim /etc/udev/rules.d/51-android.rules - ``` - - Write the following content: - - ```text - SUBSYSTEM=="usb", ATTR{idVendor}=="12d1", ATTR{idProduct}=="107e", MODE="0666" - ``` - - Note: 12d1 and 107e are the IDs queried in the previous step. - -3. Edit the adb_usb.ini file. - - ```bash - vim -/.android/adb_usb.ini - ``` - - Write the following content: - - ```text - 0x12d1 - ``` - - Note: 12d1 is the ID queried in step 5.1. - -4. Restart the ADB service. - - ```bash - sudo adb kill-server - sudo adb start-server - ``` - -5. Run the `adb devices` command again to check whether the connection is successful. - - -### 3.1.4 Installing and Configuring the NPU Environment for Kirin 990 Mobile Phones (Optional) -3.1.4.1 Preparations -1. Prepare a Kirin 990 phone. The Mate30 Pro is recommended. -2. A server on which ubuntu 16.04.3 has been installed. - -3.1.4.2 Installation and Deployment -1 Download the HUAWEI HiAI DDK from https://developer.huawei.com/consumer/cn/doc/development/hiai-Library/ddk-download-0000001053590180, Download hwhiai-ddk-100.500.010.010.zip, and decompress it to the /data/tools/ directory. The directory structure is "/data/tools/hwhiai-ddk-100.500.010.010/". -2 Copy the dependent files to the mobile phone. -Copy all contents in the tools_sysdbg directory to the /data/local/tmp directory on the mobile phone. -```bash -adb push /data/tools/hwhiai-ddk-100.500.010.010/tools/tools_sysdbg/* /data/local/tmp/ -``` -3 Log in to the mobile phone, set environment variables, and add the file execution permission. -```bash -adb shell -export LD_LIBRARY_PATH=/data/local/tmp/ -chmod +x /data/local/tmp/model_run_tool -chmod +x /data/local/tmp/data_proc_tool -``` -4 Installing the ADB Debug Tool -Reference to section 3.1.3.2. - -### 3.2 Installing and Starting the Evaluation Service - -1 Installation: Install the vega on the evaluation server, and add the `--no-dependencies` parameter during installation. Do not install dependencies. -2 Start: Run the `vega-evaluate_service-service -i {your_ip_adress} -w {your_work_path}` command. The `-i` parameter specifies the IP address of the current server and -the `-w` parameter specifies the working path, please use absolute path. The intermediate files generated during program running are stored in this directory. -For details about other optional parameters, see the help information of this command. Generally, the default values are recommended. - -## 4. Use evaluate service - -To use evaluate service, you only need to configure a few lines in the configuration file, as shown in the following example. - -```yaml -evaluator: - type: Evaluator - device_evaluator: - type: DeviceEvaluator - hardware: "Davinci" - remote_host: "http://192.168.0.2:8888" -``` - -The configuration of `evaluator` is at the same level as your configuration of `trainer`. Two parameters need to be configured. `hardware` indicates the hardware device to be evaluated. Currently, `Davinci` and `Bolt` are supported. `remote_host` indicates the IP address and port number of the evaluation server to be deployed. - -## 5. Customizing the Evaluation Service (Optional) - -Evaluate service supports devices such as Davinci inference chips and mobile phones. However, new hardware devices are emerging. Therefore, Vega provides customized scalability. - -The process of the evaluate service is as follows: - -1. obtaining input information -2. Instantiate a specific hardware instance according to the hardware to be evaluated -3. Model conversion -4. inference -5. Return the inference result - -Steps 3 and 4 may be different for different hardware. Therefore, when new hardware needs to be added, perform the two steps based on the hardware usage. Specifically, the procedure is as follows: - -Add a hardware class to the hardwares directory and implement the `convert_model` and `inference` interfaces as follows: - - ```python -from class_factory import ClassFactory - -@ClassFactory.register() -class MyHardware(object): - - def __init__(self, optional_params): - pass - - def convert_model(self, backend, model, weight, **kwargs): - pass - - def inference(self, converted_model, input_data, **kwargs): - - return latency, output -``` - -In the preceding example, the `MyHardware` class is defined and registered through `@ClassFactory.register()`. - -The class implements the `convert_model` and `inference` interfaces, `backend` indicates the training framework through which the model is saved, for example, `pytorch` and `tensorflow`, which provide necessary auxiliary information for model parsing. `model` and `weight` indicate the training framework through which the model is saved, respectively. - -Model and weight to be converted. The value of weight is optional and may be empty. `converted_model` and `input_data` indicate the converted model and input data, respectively. - -Add the class to `__init__.py` of the hardware. - -```python -from .my_hardware import MyHardware -``` - -## 6. FAQ - -### 6.1 Convert pytorch model to caffe model - -If you need to convert the pytorch model to caffe model, download [PytorchToCaffe](https://github.com/xxradon/PytorchToCaffe) and store it in the `./third_party` directory (the third_party directory and vega directory are at the same directory level). - -Note: The third-party open-source software does not support pytorch1.1. If you use the model in the native torchvisoin and the torchvision version is later than 0.2.0, you need to make the following additional modifications: -Add the following content to the `pytorch_to_caffe.py` file: - -```python - -def _flatten(raw , input, * args): - x = raw(input, *args) - if not NET_INITTED: - return x - layer_name=log.add_layer(name='flatten') - top_blobs=log.add_blobs([x],name='flatten_blob') - layer=caffe_net.Layer_param(name=layer_name,type='Reshape', - bottom=[log.blobs(input)],top=top_blobs) - start_dim = args[0] - end_dim = len(x.shape) - if len(args) > 1: - end_dim = args[1] - dims = [] - for i in range(start_dim): - dims.append(x.shape[i]) - cum = 1 - for i in range(start_dim, end_dim): - cum = cum * x.shape[i] - dims.append(cum) - if end_dim != len(x.shape): - cum = 1 - for i in range(end_dim, len(x.shape)): - cum = cum * x.shape[i] - dims.append(cum) - layer.param.reshape_param.shape.CopyFrom(caffe_net.pb.BlobShape(dim=dims)) - log.cnet.add_layer(layer) - return x - - -torch.flatten = Rp(torch.flatten,_flatten) -``` - -### 6.2 Model evaluation of Pytorch 1.2 and earlier versions - -If the `Pytorch` version is 1.2 or earlier, operators may not be supported when the `Pytorch` model is converted to the `onnx` model. If the `upsample_bilinear2d` operator is not supported, you can upgrade the `Pytorch` version to 1.3 or later, or you can obtain `pytorch/torch/onnx/symbolic_opset10.py`, from the `Pytorch` official code library and copy it to the `Pytorch` installation directory. - -### 6.3 Failed to find scripts such as model_convert.sh - -There are many `shell` scripts in the evaluation service. The file format must be `unix`. If you have opened a file in Windows or converted the file when downloading the code, the file format may be changed to DOS. Pay attention to the file format. diff --git a/docs/en/user/faq.md b/docs/en/user/faq.md index d9d4517..9cb22ae 100644 --- a/docs/en/user/faq.md +++ b/docs/en/user/faq.md @@ -2,22 +2,14 @@ ## 1. Exceptions -### 1.1 Exception `ModuleNotFoundError: No module named 'mmdet'` - -To run algorithms such as SP-NAS, you need to install the open-source software mmdetection. For details, see the installation guide of the software. - -### 1.2 Exception `ModuleNotFoundError: No module named 'nasbench'` - -Before running the benchmark, install the open-source software NASBench. For details, see the installation guide of the software. - -### 1.3 Exception `Exception: Failed to create model, model desc={}` +### 1.1 Exception `Exception: Failed to create model, model desc={}` The possible causes are as follows: 1. The network is not registered with the Vega. Before invoking the network, you need to use `@ClassFactory.register` to register the network. For details, see . 2. The model description file of the network is incorrect. You can locate the fault based on `` in the exception information. -### 1.5 Exception `ImportError: libgthread-2.0.so.0: cannot open shared object file: No such file or directory` +### 1.2 Exception `ImportError: libgthread-2.0.so.0: cannot open shared object file: No such file or directory` The opencv-python system dependency library is missing. Run the following command: @@ -25,7 +17,7 @@ The opencv-python system dependency library is missing. Run the following comman sudo apt install libglib2.0-0 ``` -### 1.6 Exception `ModuleNotFoundError: No module named'skbuild '` or stuck in `Running setup.py bdist_wheel for opencv-python-headless...` during installation +### 1.3 Exception `ModuleNotFoundError: No module named'skbuild '` or stuck in `Running setup.py bdist_wheel for opencv-python-headless...` during installation The possible cause is that the PIP version is too early. Run the following command: @@ -33,18 +25,14 @@ The possible cause is that the PIP version is too early. Run the following comma pip3 install --user --upgrade pip ``` -### 1.7 Exception `PermissionError: [Errno 13] Permission denied: 'dask-scheduler'`, `FileNotFoundError: [Errno 2] No such file or directory: 'dask-scheduler': 'dask-scheduler'`, or `vega: command not found` +### 1.4 Exception `PermissionError: [Errno 13] Permission denied: 'dask-scheduler'`, `FileNotFoundError: [Errno 2] No such file or directory: 'dask-scheduler': 'dask-scheduler'`, or `vega: command not found` This type of exception is usually caused by the failure to find `dask-scheduler` in `PATH`. Generally, the file is installed in `//.local/bin`. After the Vega is installed , `//.local/bin/` is automatically added to the `PATH` environment variable. The setting does not take effect immediately. You can run the ls command `source ~/.profile` or log in again to make the setting take effect. If the problem persists, check whether the dask-scheduler file exists in the `//.local/bin` directory. If the file already exists, manually add `//.local/bin` to the environment variable `PATH`. -### 1.8 Exception During Pytorch model evaluation: `FileNotFoundError: [Errno 2] No such file or directory: '/torch2caffe.prototxt'` - -For details, see section 6.1 in [Evaluate Service](./evaluate_service.md). - -## 2. Common Configuration Problems +## 2. Configuration Issues ### 2.1 How do I configure multi-GPU/NPU @@ -112,29 +100,11 @@ general: level: info # debug|info|warn|error| ``` -### 2.5 How do I view the search progress in real time - -Vega provides the visualized progress of the model search process. User could set `VisualCallBack` within `USER.yml` as follow, - -```yaml - trainer: - type: Trainer - callbacks: [VisualCallBack, ] -``` - -The output directory of the visualized information is as follows: - -```text -./tasks//visual -``` - -Run the `tensorboard --logdir PATH` command on the active node to start the service and view the progress in the browser. For details, see TensorBoard commands and instructions. - -### 2.6 How Do I Stop the VEGA Program Running in the Background +### 2.5 How Do I Stop the VEGA Program Running in the Background If only the main Vega process is killed, some processes will not be stopped in time, and the resources occupied by the processes will not be released. -The Vega application can be terminated using the following command: +In safe mode, the Vega application can be terminated using the following command: ```bash # Query the process ID of the running Vega main program. @@ -149,19 +119,56 @@ vega-kill -a vega-kill -f ``` -### 2.6 How Do I Stop the Vega Program Running in the Background? +In common mode, run the following command:: -In the multi-GPU/NPU scenario, Vega starts the dask scheduler, dask worker, and trainer. If only the main Vega process is killed, some processes are not stopped in time and the resources occupied by these processes are not released. +```bash +vega-kill -s -l +vega-kill -s -p +vega-kill -s -a +vega-kill -s -f +``` -Run the following command to stop the Vega application: +### 2.6 How Do I Query the Running Vega Program + +In safe mode, run the following command to query the running Vega applications: ```bash -# Query the process ID of the running Vega main program. -vega-kill -l -# Stop a Vega main program and related processes. -vega-kill -p -# Or stop all Vega processes at a time. -vega-kill -a -# If the main program is closed normally and there are still residual processes, you can forcibly clear the process. -vega-kill -f +vega-process +``` + +In common mode, you can run the following command to query: + +```bash +vega-process -s +``` + +### 2.7 How Do I Query the Vega Program Running Progress + +In safe mode, you can run the following command to query the running progress of the Vega program: + +```bash +vega-progress -t -r ``` + +In common mode, you can run the following command to query: + +```bash +vega-progress -s -t -r +``` + +### 2.8 How to Perform Model Inference Using the Vega Program + +Classification model inference can be performed with the command `vega-inference`, and detection model inference can be performed with the command `vega-inference-det`. + +Run the following command to query the command parameters: + +```bash +vega-inference --help +vega-inference-det --help +``` + +## 3. Precautions + +### 3.1 Reserve Sufficient Disk Space + +During Vega running, there is a model that caches each searched network. When the number of searched networks is large, a large amount of storage space is required. Reserve sufficient disk space based on the number of search network models for each search algorithm. diff --git a/docs/en/user/install.md b/docs/en/user/install.md index 7e8a16f..059159d 100644 --- a/docs/en/user/install.md +++ b/docs/en/user/install.md @@ -6,8 +6,8 @@ The host where the Vega is installed has a GPU and meets the following requireme 1. Ubuntu 18.04 or EulerOS 2.0 SP8 2. CUDA 10.0 or CANN 20.1 -3. Python 3.7 -4. pip3 +3. Python 3.7 or later +4. pytorch, tensorflow(>1.14, <2.0) or mindspore ## 2. Installing Vega diff --git a/docs/en/user/security_configure.md b/docs/en/user/security_configure.md new file mode 100644 index 0000000..33df6d0 --- /dev/null +++ b/docs/en/user/security_configure.md @@ -0,0 +1,260 @@ +# VEGA security configuration + +The security configuration of the Vega includes the following steps: + +1. Install OpenSSL +2. Generate the CA root certificate +3. Generate the certificate for evaluate_services +4. Generate the certificate for dask +5. Encrypt the private key password +6. Configure security-related configuration files +7. Configure the evaluation service daemon service +8. Install dask and distributed +9. Configuring the HCCL trustlist +10. Precautions + +## 1. Install OpenSSL + +You need to install OpenSSL 1.1.1, compile and install from the source code, or directly install the compiled release package. + +Install the Python interface of the OpenSSL as follows: + +```shell +pip3 install --user pyOpenSSL==19.0.0 +``` + +## 2. Generate the CA Certificate + +Run the following command to generate a CA certificate: + +```shell +openssl genrsa -out ca.key 4096 +openssl req -new -x509 -key ca.key -out ca.crt -subj "/C=/ST=/L=/O=/OU=/CN=" +``` + +Note: ``, ``, ``, ``, ``, and `` should be set based on the situation. The configuration in this document is the same. +In addition, the CA configuration must be different from other configurations. + +## 3. Generate the Certificate for Evaluate_service + +The evaluation service supports encryption certificates and common certificates. + +1. If an encryption certificate is used, install Huawei KMC security components. For details, see section "Generating an Encryption Certificate." +2. If a common certificate is used, see section "Generating a Common Certificate." + +### 3.1 Generating the Encryption Certificate + +Run the following commands to generate the encryption private key for the server of evaluate_service. When you run this command, the system prompts you to enter the encryption password. The password strength requirements are as follows: + +1. The password contains at least eight characters. +2. The value must contain at least one uppercase letter. +3. The value must contain at least one lowercase letter. +4. The value must contain at least one digit. + +```shell +openssl genrsa -aes-256-ofb -out server.key 4096 +``` + +Run the following commands to generate a certificate and delete the temporary file: + +```shell +openssl req -new -key server.key -out server.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt +rm server.csr +``` + +Run the following commands to generate the encryption private key of the certificate for the client of evaluate_service. When you run this command, the system prompts you to enter the encryption password. The password strength must be the same as that of the server private key and is different from that of the server private key. Record the password and use it later. + +```shell +openssl genrsa -aes-256-ofb -out client.key 4096 +``` + +Run the following commands to generate a certificate and delete the temporary file: + +```shell +openssl req -new -key client.key -out client.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client.crt +rm client.csr +``` + +### 3.2 Generating the Common Certificate + +Run the following commands to generate the private key and certificate for server and client of evaluate_service: + +```shell +openssl genrsa -out server.key 4096 +openssl req -new -key server.key -out server.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server.crt +rm server.csr + +openssl genrsa -out client.key 4096 +openssl req -new -key client.key -out client.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in client.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client.crt +rm client.csr +``` + +## 4. Generate the Certificate for Dask + +Run the following commands to generate the private key and certificate for server and client of dask: + +```shell +openssl genrsa -out server_dask.key 4096 +openssl req -new -key server_dask.key -out server_dask.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in server_dask.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out server_dask.crt +rm server_dask.csr + +openssl genrsa -out client_dask.key 4096 +openssl req -new -key client_dask.key -out client_dask.csr -extensions v3_ca -subj "/C=/ST=/L=/O=/OU=/CN=" +openssl x509 -req -in client_dask.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out client_dask.crt +rm client_dask.csr +``` + +Run the following command to delete the CA private key: + +```shell +rm ca.key +``` + +## 5. Encrypting the Private Key Password + +If the encryption certificate is used, perform the rest of this section. If the common certificate is used, skip this section. + +To encrypt the private key passwords of the server and client for evaluate_service, you need to install Huawei KMC security component and add the directory where the dynamic link library of the security component is located to `LD_LIBRARY_PATH`. + +```shell +export LD_LIBRARY_PATH=:$LD_LIBRARY_PATH +``` + +Install Vega and use the password encryption tool to encrypt the password. +When running the following command, enter the password entered during private key generation. This command will generate an encrypted password. Save the two encrypted passwords in the configuration file: + +```shell +vega-encrypt_key --cert=server.crt --key=server.key --key_component_1=ksmaster_server.dat --key_component_2=ksstandby_server.dat +vega-encrypt_key --cert=client.crt --key=client.key --key_component_1=ksmaster_client.dat --key_component_2=ksstandby_client.dat +``` + +## 6.Configure Security-related Configuration Files + +Create the `.vega` directory in the home directory of the current user, copy the generated keys, certificates, and encryption materials to this directory, and change the permission. + +```shell +mkdir -/.vega +mv * -/.vega/ +chmod -R 600 -/.vega +``` + +Description: + +1. The preceding keys, certificates, and encryption materials can also be stored in other directories. The access permission must be set to 600 and the file location must be changed in subsequent configuration files. +2. In the train cluster, reserve `ca.crt`, `client.key`, `client.crt`, `ksmaster_client.dat`, `ksstandby_client.dat`, and `server_dask.key. `, `server_dask.crt`, `client_dask.key`, `client_dask.crt`, and delete other files. +3. In the evaluate service, reserve `ca.crt`, `server.key`, `server.crt`, `ksmaster_server.dat`, and `ksstandby_server.dat` files, and delete other files. + +Create `server.ini` and `client.ini` in the `~/.vega` directory. + +In the train cluster, configure `~/.vega/server.ini` and `~/.vega/client.ini`. + +server.ini: + +```ini +[security] +ca_cert=<-/.vega/car.crt> +server_cert_dask=<-/.vega/server_dask.crt> +server_secret_key_dask=<-/.vega/server_dask.key> +client_cert_dask=<-/.vega/client_dask.crt> +client_secret_key_dask=<-/.vega/ client_dask.key> +``` + +client.ini: + +```ini +[security] +ca_cert=<-/.vega/car.crt> +client_cert=<-/.vega/client.crt> +client_secret_key=<-/.vega/client.key> +encrypted_password= #If a common certificate is used, leave this parameter blank. +If the key_component_1=<~/.vega/ksmaster_client.dat> #If a common certificate is used, leave this parameter blank. +If the key_component_2=<~/.vega/ksstandby_client.dat> #If a common certificate is used, leave this parameter blank. +``` + +On the evaluation server, configure `~/.vega/vega.ini`. + +```ini +[security] +ca_cert=<-/.vega/car.crt> +server_cert=<-/.vega/server.crt> +server_secret_key=<-/.vega/server.key> +encrypted_password= #If a common certificate is used, leave this parameter blank. +If the key_component_1=<~/.vega/ksmaster_server.dat> # uses a common certificate, leave this parameter blank. +If the key_component_2=<~/.vega/ksstandby_server.dat> # uses a common certificate, leave this parameter blank. +``` + +## 7. Configuring the Evaluation Service Daemon Service + +The systemctl is used to manage the evaluation server process. When the process is abnormal, the systemctl automatically restarts to ensure the continuity of the evaluation server. + +Create a script `run_evaluate_service.sh` for starting the evaluation service. Replace `` and `` with the actual IP address and directory. + +```shell +vega-evaluate_service-service -i -w +``` + +Create a daemon service file `evaluate-service`. The script content is as follows. Replace it with the actual script location. + +```ini +[Unit] +Description=Vega Evaluate Service Daemon +[Service] +Type=forking +ExecStart=//run.sh +Restart=always +RestartSec=60 +[Install] +WantedBy=multi-user.target +``` + +Copy `evaluate-service` to the `/usr/lib/systemd/system` directory and start the service. + +```shell +sudo cp evaluate-service /usr/lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start evaluate-service +``` + +## 8. Install Dask and Distributed + +When Vega is installed, the latest versions of the Dashboard and Distributed are automatically installed. In the current version, a bug exists when the Dashboard is disabled in Distributed. You need to run the following commands to install the two components of the following versions: + +```shell +pip3 install --user dask==2.11.0 +pip3 install --user distributed==2.11.0 +``` + +## 9. Configuring the HCCL Trustlist + +For details, see the [Configuration Guide](https://support.huawei.com/enterprise/en/doc/EDOC1100206669/8e964064) provided by the Ascend. + +## 10. Precautions + +### 10.1 Model Risks + +For an AI framework, a model is a program. A model may read and write files and send network data. For example, TensorFlow provides the local operation API tf.read_file, tf.write_file. The return value is an operation that can be directly executed by TensorFlow. +Therefore, exercise caution when using a model with unknown sources. Before using the model, check whether malicious operations exist in the model to eliminate security risks. + +### 10.2 Risks of Running Scripts + +The script_runner function provided by Vega can invoke external scripts to perform hyperparameter optimization. Check the script source and ensure that no malicious operation exists. Exercise caution when running scripts from unknown sources. + +### 10.3 Do Not Use KMC Components By Different Users At The Same Time + +If the KMC component is used to encrypt the private key password, note that different users cannot use the KMC component at the same time. +To switch user, run the following command as the root user to query the current semaphore: + +```bash +ipcs +``` + +Run the following command to delete all the semaphores: + +```bash +ipcrm -S '' +``` diff --git a/docs/images/fine_grained_space.png b/docs/images/fine_grained_space.png deleted file mode 100644 index 8e8362a..0000000 Binary files a/docs/images/fine_grained_space.png and /dev/null differ diff --git a/docs/images/search_space_classes.png b/docs/images/search_space_classes.png deleted file mode 100644 index 8365a43..0000000 Binary files a/docs/images/search_space_classes.png and /dev/null differ diff --git a/docs/images/search_space_flow.png b/docs/images/search_space_flow.png deleted file mode 100644 index da92308..0000000 Binary files a/docs/images/search_space_flow.png and /dev/null differ diff --git a/evaluate_service/LICENSE b/evaluate_service/LICENSE new file mode 100644 index 0000000..0bb898d --- /dev/null +++ b/evaluate_service/LICENSE @@ -0,0 +1,188 @@ + Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +Apache License, Version 2.0 + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/evaluate_service/MANIFEST.in b/evaluate_service/MANIFEST.in new file mode 100644 index 0000000..33f2919 --- /dev/null +++ b/evaluate_service/MANIFEST.in @@ -0,0 +1,9 @@ +#dispatch files to site-packages +recursive-include docs * +recursive-include evaluate_service * +include LICENSE +include MANIFEST.in +include README.cn.md +include README.md +include RELEASE.md +include setup.py diff --git a/evaluate_service/README.cn.md b/evaluate_service/README.cn.md new file mode 100644 index 0000000..6e72cfb --- /dev/null +++ b/evaluate_service/README.cn.md @@ -0,0 +1,142 @@ +# Vega 评估服务 + +**中文 | [English](./README.md)** + +--- + +## 1. 简介 + +模型评估服务是用于评估模型在特定硬件设备上的性能,如评估剪枝和量化后的模型在Atlas200 DK、Atlas300上的准确率、模型大小和时延等。 + +评估服务目前支持的硬件设备为Davinci推理芯片(Atlas200 DK、ATLAS300产品和开发板环境Evb)和手机,后继会扩展支持更多的设备。 + +评估服务为CS架构, 评估服务在服务端部署, 客户端通过`REST`接口向服务端发送评估请求和获取结果。Vega在进行网络架构搜索时,可以利用评估服务进行实时检测模型性能。在搜索阶段产生备选网络后,可以将该网络模型发送给评估服务,评估服务完成模型评估后,返回评估结果给Vega,Vega根据评估结果,进行后继的搜索。这种实时的在实际的设备上的评估,有利于搜索出对实际硬件更加友好的网络结构。 + +## 2. 规格 + +支持的模型和硬件设备 + +| 算法 | 模型 | Atlas 200 DK |Atlas 300 | Bolt | +| :--: | :--: | :--: | :--: | :--: | +| Prune-EA | ResNetGeneral | √ | √ | √| +| ESR-EA | ESRN | | √ | √ | +| SR-EA | MtMSR | | √ | √ | +| Backbone-nas | ResNet | √ | √ | | +| CARS | CARSDartsNetwork | | √ | | +| Quant-EA | ResNetGeneral | √ | √ | √ | +| CycleSR | CycleSRModel | | | | +| Adlaide-EA | AdelaideFastNAS | | √ | | +| Auto-Lane | ResNetVariantDet | | | +| Auto-Lane | ResNeXtVariantDet | | | + +## 3. 评估服务部署 + +以下介绍Atalas 300评估服务的部署过程,若需要部署Atlas 200DK或者ARM芯片手机,请联系我们。 + +### 3.1 安装配置Atlas300环境 + +首先需要配置Ascend 300环境,请参考[配置文档](./ascend_310.md)。 + +然后请安装评估服务,请执行如下命令安装: + +```bash +pip3 install --user --upgrade evaluate-service +``` + +安装完成后,将`~/.local/lib/python3.7/site-packages/evaluate_service/hardwares/davinci/samples/atlas300`拷贝到当前目录,执行如下操作,检查环境是否配置正确: + +```bash +echo "[INFO] start check the enviroment..." +python3 -c "import te" && echo "[INFO] check te sucess" +python3 -c "import topi" && echo "[INFO] check topi sucess" +atc --version && echo "[INFO] check atc sucess " +echo "[INFO] start compile the example..." +cd ./atlas300/ +mkdir -p build/intermediates/host +cd build/intermediates/host +cmake ../../src -DCMAKE_CXX_COMPILER=g++ -DCMAKE_SKIP_RPATH=TRUE +make && echo "[INFO] check the env sucess!" +``` + +### 3.2 启动评估服务 + +使用如下命令启动评估服务: + +```shell +vega-evaluate_service-service -i {your_ip_adress} -p {port} -w {your_work_path} +``` + +其中: + +- `-i`参数指定当前使用的服务器的ip地址 +- `-p`参数指定当前使用的服务器的的监听端口,默认值8888 +- `-w`参数指定工作路径, 程序运行时的中间文件将存储在该目录下,请使用绝对路径 + +注意: + +以上启动命令会启动安全模式,需要预先进行安全配置,请参考[安全配置](https://github.com/huawei-noah/vega/tree/master/docs/cn/user/security_configure.md)。 + +也可以使用`-s`参数,启用普通模式,不需要如上配置,命令如下: + +```shell +vega-evaluate_service-service -s -i {your_ip_adress} -w {your_work_path} +``` + +## 4. 使用评估服务 + +使用评估服务时, 需要在Vega调用的配置文件中做如下配置: + +```yaml +evaluator: + type: Evaluator + device_evaluator: + type: DeviceEvaluator + hardware: "Davinci" + remote_host: "https://:" +``` + +其中: + +- `evaluator`的配置和`trainer`配置处于同一层级。 +- `hardware`为评估的硬件设备,当前支持`Davinci`和`Bolt`两种。 +- `remote_host`为评估服务器的ip和端口号,对于普通模式,请设置为:`http://:` + +## 5. 自定义评估服务 + +vega评估服务当前已经支持Davinci推理芯片和手机等端侧设备的评估, 但新的硬件设备是层出不穷的, 因此评估服务提供了可自定义的扩展能力。 + +评估服务的流程是: + +1. 获取输入信息 +2. 根据需要评估的硬件实例化一个具体的硬件实例 +3. 模型转换 +4. 推理 +5. 返回推理结果 + +对于不同的硬件, 步骤3和4可能是不同的。 因此当需要添加新的硬件时, 需要根据具体硬件的用法实现这2个步骤。具体来说, 分以下几个步骤: + +在hardwares目录下添加一个硬件类, 并实现`convert_model`和`inference`两个接口 如下: + +```python +from class_factory import ClassFactory +@ClassFactory.register() +class MyHardware(object): + + def __init__(self, optional_params): + pass + + def convert_model(self, backend, model, weight, **kwargs): + pass + + def inference(self, converted_model, input_data, **kwargs): + + return latency, output +``` + +上面的示例中定义了`MyHardware`类, 并通过`@ClassFactory.register()`进行注册。 类中实现了`convert_model`和`inference`两个接口, `backend`表示模型是通过何种训练框架保存的, 如`pytorch`, `tensorflow`等, 为模型解析提供必要的辅助信息,`model`和`weight`分别表示需要转换的模型和权重,`weight`是非必须的,其值可能为空。`converted_model`和`input_data`分别表示转换之后的模型和输入数据。 + +然后在hardware的`__init__.py`中加入自定义的类。 + +```python +from .my_hardware import MyHardware +``` diff --git a/evaluate_service/README.md b/evaluate_service/README.md new file mode 100644 index 0000000..08342e0 --- /dev/null +++ b/evaluate_service/README.md @@ -0,0 +1,142 @@ +# Vega Evaluate Service + +**English | [中文](./README.cn.md)** + +--- + +## 1. Introduction + +The model evaluation service is used to evaluate the performance of a model on a specific hardware device, such as the accuracy, model size, and latency of a pruned and quantized model on the Atlas 200 DK. + +Currently, the evaluation service supports Davincit inference chips (Atlas 200 DK, ATLAS300, and development board environment Evb) and mobile phones. More devices will be supported in the future. + +The evaluation service uses the CS architecture. The evaluation service is deployed on the server. The client sends an evaluation request to the server through the `REST` interface and obtains the result. Vega can use the evaluation service to detect model performance in real time during network architecture search. After a candidate network is generated in the search phase, the network model can be sent to the evaluation service. After the model evaluation is complete, the evaluation service returns the evaluation result to Vega. Vega performs subsequent search based on the evaluation result. This real-time evaluation on the actual device helps to search for a network structure that is more friendly to the actual hardware. + +## 2. spec + +Supported Models and Hardware Devices: + +| Algorithm | Model | Atlas 200 DK |Atlas 300 | Bolt | +| :--: | :--: | :--: | :--: | :--: | +| Prune-EA | ResNetGeneral | √ | √ | √ | +| ESR-EA | ESRN | | √ | √ | +| SR-EA | MtMSR | | √ | √ | +| Backbone-nas | ResNet| √| √ | | +| CARS | CARSDartsNetwork | | √ | | +| Quant-EA | ResNetGeneral | √ | √ | √ | +| CycleSR | CycleSRModel | | | | +| Adlaide-EA | AdelaideFastNAS | | √ | | +| Auto-Lane | ResNetVariantDet | | | +| Auto-Lane | ResNeXtVariantDet | | | + +## 3. Evaluation Service Deployment + +### 3.1 Environment installation and configuration (Optional) + +Configure the hardware (Atlas 200 DK, Atlas 300, or mobile phone) by following the instructions provided in the following sections. + +### 3.1.1 Install the Atlas 200DK environment (Optional) + +Please contact us. + +### 3.1.2 Install and configure the Atlas 300 Environment (Optional) + +For details, see the Huawei official tutorial at . + +Note: The preceding documents may be updated. Please follow the released updates or obtain the corresponding guide documents. After the environment is installed, you need to set environment variables. For details, see the preceding guide. To facilitate environment configuration, we provide the environment variable configuration template [env_atlas300.sh](https://github.com/huawei-noah/vega/blob/master/evaluate_service/hardwares/davinci/env/env_atlas300.sh) for your reference. The actual environment prevails. + +The installation of the Atlas300 environment is complex. To ensure that the environment is correctly installed, please run [check_atlas300.sh](https://github.com/huawei-noah/vega/blob/master/evaluate_service/hardwares/davinci/env/check_atlas300.sh). + +### 3.1.3 Install and configure the mobile environment (Optional) + +Please contact us. + +### 3.1.4 Install and configure the NPU environment for Kirin 990 mobile (Optional) + +Please contact us. + + +### 3.2 Start the evaluation service + +Run the following command to start the evaluate service: +```shell +vega-evaluate_service-service -i {your_ip_adress} -p {port} -w {your_work_path} +``` + +where: +- `-i` indicates the IP of the server +- `-p` indicates the listen port,default is 8888 +- `-w` indicates the work dir, please use the absolute path + +Note: +The above command will run in security mode, the security configurations need to be performed in advance. +please refer to [security cinfigure](https://github.com/huawei-noah/vega/tree/master/docs/cn/user/security_configure.md)。 + +You can also use the `-s` parameter to enable the common mode. The security configuration is not required. The command is as follows: +```shell +vega-evaluate_service-service -s -i {your_ip_adress} -w {your_work_path} +``` + +## 4. Use evaluate service + +To use evaluate service, you only need to configure a few lines in the configuration file, as shown in the following example. + +```yaml +evaluator: + type: Evaluator + device_evaluator: + type: DeviceEvaluator + hardware: "Davinci" + remote_host: "https://:" +``` + +where: +- `evaluator` is at the same level as your configuration of `trainer`. +- `hardware` indicates the hardware device to be evaluated. Currently, `Davinci` and `Bolt` are supported. +- `remote_host` indicates the IP address and port of the evaluation server. For common mode, please set as +`http://:` + +## 5. Customizing the Evaluation Service (Optional) + +Evaluate service supports devices such as Davinci inference chips and mobile phones. However, new hardware devices are emerging. Therefore, Vega provides customized scalability. + +The process of the evaluate service is as follows: + +1. obtaining input information +2. Instantiate a specific hardware instance according to the hardware to be evaluated +3. Model conversion +4. inference +5. Return the inference result + +Steps 3 and 4 may be different for different hardware. Therefore, when new hardware needs to be added, perform the two steps based on the hardware usage. Specifically, the procedure is as follows: + +Add a hardware class to the hardwares directory and implement the `convert_model` and `inference` interfaces as follows: + + ```python +from class_factory import ClassFactory + +@ClassFactory.register() +class MyHardware(object): + + def __init__(self, optional_params): + pass + + def convert_model(self, backend, model, weight, **kwargs): + pass + + def inference(self, converted_model, input_data, **kwargs): + + return latency, output +``` + +In the preceding example, the `MyHardware` class is defined and registered through `@ClassFactory.register()`. + +The class implements the `convert_model` and `inference` interfaces, `backend` indicates the training framework through which the model is saved, for example, `pytorch` and `tensorflow`, which provide necessary auxiliary information for model parsing. `model` and `weight` indicate the training framework through which the model is saved, respectively. + +Model and weight to be converted. The value of weight is optional and may be empty. `converted_model` and `input_data` indicate the converted model and input data, respectively. + +Add the class to `__init__.py` of the hardware. + +```python +from .my_hardware import MyHardware +``` \ No newline at end of file diff --git a/evaluate_service/RELEASE.md b/evaluate_service/RELEASE.md new file mode 100644 index 0000000..85891f2 --- /dev/null +++ b/evaluate_service/RELEASE.md @@ -0,0 +1,23 @@ +**Evaluate Service ver1.8.0 released:** + +**Introduction** + +The evaluation service is a tool used to evaluate the performance of a +model on specific hardware developed by Noah's Ark Laboratory, the main features are as follows: +1. Multi-Backend: PyTorch, TensorFlow, MindSpore and Caffe. The input model can come form Pytorch, Tensorflow, +Mindspore and caffe. +2. Multi-hardware: Mobile Phone, Ascend 310, Kirinri 990 and etc. The model can be evaluated on multiple types of hardware. +3. Online real-time evaluation and offline evaluation. The evaluate service can combine with [Vega](https://github.com/huawei-noah/vega) +to implement network architecture search with hardware in the ring. One can also use the evaluate service independently. +4. Supports secure communication encryption. In security mode, communications are encrypted to secure the model and data. + +**Installation** + +Install evaluate service and the open source softwares that evaluate service depends on: + +`pip3 install --user --upgrade evaluate-service` + +**Cooperation and Contribution** + +Welcome to use evaluate-service. If you have any questions or suggestions, need help, fix bugs, contribute new algorithms, +or improve the documentation, submit an issue in the community. We will reply to and communicate with you in a timely manner. diff --git a/evaluate_service/docs/cn/ascend_310.md b/evaluate_service/docs/cn/ascend_310.md new file mode 100644 index 0000000..61c140c --- /dev/null +++ b/evaluate_service/docs/cn/ascend_310.md @@ -0,0 +1,97 @@ +# 部署Ascend环境 + +请参考Ascend官方文档部署Ascend环境,如下安装指导是安装过程中的关键步骤,若安装过程中出现问题,请以官方文档为准。 +在进行部署前,请在官方网站下载安装包。 + +## 1 检查已安装的Driver和CANN版本 + +若是全新的Ascend主机,需要检查是否存在`/usr/local/HiAi`目录,若存在,需要使用root账号执行如下命令卸载该目录: + +```bash +/usr/local/HiAi/uninstall.sh +``` + +需要使用非root账号执行如下命令创建`Ascend`目录,并给该目录设置为用户`HwHiAiUser`可访问: + +```bash +mkdir /usr/local/Ascend/ +sudo chown -R :HwHiAiUser /usr/local/Ascend/ +sudo chmod -R 750 /usr/local/Ascend/ +``` + +若`/usr/local/Ascend/`已存在,则需要在安装前需要检查是否已安装了较旧的Driver和CANN包, +请使用如下命令查询各个组件的版本号: + +```bash +cat /usr/local/Ascend/driver/version.info +cat /usr/local/Ascend/nnae/latest/ascend_nnae_install.info +cat /usr/local/Ascend/ascend-toolkit/latest/arm64-linux/ascend_toolkit_install.info +cat /usr/local/Ascend/nnrt/latest/arm64-linux/ascend_nnrt_install.info +cat /usr/local/Ascend/tfplugin/latest/ascend_tfplugin_install.info +``` + +若版本号较低,需要使用root账号执行卸载: + +```bash +/usr/local/Ascend/driver/script/uninstall.sh +/usr/local/Ascend/nnae/latest/script/uninstall.sh +/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/script/uninstall.sh +/usr/local/Ascend/nnrt/latest/arm64-linux/script/uninstall.sh +/usr/local/Ascend/tfplugin/latest/script/uninstall.sh +``` + +若使用X86平台,请将如上命令中包含的目录中的`arm64-linux`替换为`x86_64-linux`。 + +若nnae、ascend-toolkit、nnrt、tfplugin使用非root安装,请使用该用户卸载。 + +## 2 安装Driver和CANN + +使用root用户执行如下命令安装,如下版本号供参考: + +```bash +chmod +x *.run +./A300-3000-3010-npu-driver_21.0.2_linux-aarch64.run --full +``` + +执行如下命令,确认安装是否成功: + +```bash +npu-smi info +``` + +使用非root用户安装其他包,在安装前,需要将该用户设置为和`HwHiAiUser`同组: + +```bash +usermod -a -G HwHiAiUser +``` + +```bash +./Ascend-cann-nnae_5.0.T306_linux-aarch64.run --install +./Ascend-cann-nnrt_5.0.T306_linux-aarch64.run --install +./Ascend-cann-tfplugin_5.0.T306_linux-aarch64.run --install +./Ascend-cann-toolkit_5.0.T306_linux-aarch64.run --install +``` + +安装完成后,根据提示需要重启主机。 + +## 3 设置环境变量 + +请设置如下环境变量: + +```bash +export ASCEND_HOME=/usr/local/Ascend +export HOME_DIR=/home/ +export PATH=$HOME_DIR/.local/bin:$PATH +source /usr/local/Ascend/nnae/set_env.sh +source /usr/local/Ascend/nnrt/set_env.sh +source /usr/local/Ascend/tfplugin/set_env.sh +source /usr/local/Ascend/ascend-toolkit/set_env.sh +export NPU_HOST_LIB=/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/atc/lib64 +export PYTHONPATH=/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:$PYTHONPATH +export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest +export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/fwkacllib/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:$LD_LIBRARY_PATH +export PYTHONPATH=$HOME_DIR/.local/lib/python3.7/site-packages/evaluate_service/security:$PYTHONPATH +export LD_LIBRARY_PATH=$HOME_DIR/.local/lib/python3.7/site-packages/evaluate_service/security/kmc/aarch64:$LD_LIBRARY_PATH +``` + +其中``为用户目录,`$NPU_HOST_LIB`为`libascendcl.so`的路径, 需要根据`libascendcl.so`实际所在的位置配置此变量。 diff --git a/evaluate_service/docs/en/ascend_310.md b/evaluate_service/docs/en/ascend_310.md new file mode 100644 index 0000000..d376b93 --- /dev/null +++ b/evaluate_service/docs/en/ascend_310.md @@ -0,0 +1,104 @@ +# Deploy the Ascend environment. + +Deploy the Ascend environment by referring to the Ascend official document. The following installation guide +is a key step during the installation. If an error occurs during the installation, refer to the official document. +Before the deployment, download the installation package from the official website. + +## 1 Checking the Installed Driver and CANN Versions + +For a new Ascend host, check whether the `/usr/local/HiAi` directory exists. If yes, +run the following command as user root to uninstall the directory: + +```bash +/usr/local/HiAi/uninstall.sh +``` + +Run the following commands as a non-root user to create the `Ascend` directory and make the +directory accessible to the `HwHiAiUser` user: + +```bash +mkdir /usr/local/Ascend/ +sudo chown -R :HwHiAiUser /usr/local/Ascend/ +sudo chmod -R 750 /usr/local/Ascend/ +``` + +If `/usr/local/Ascend/` exists, check if the old Driver and CANN packages have been installed +before the installation. +Run the following command to query the version number of each component: + +```bash +cat /usr/local/Ascend/driver/version.info +cat /usr/local/Ascend/nnae/latest/ascend_nnae_install.info +cat /usr/local/Ascend/ascend-toolkit/latest/arm64-linux/ascend_toolkit_install.info +cat /usr/local/Ascend/nnrt/latest/arm64-linux/ascend_nnrt_install.info +cat /usr/local/Ascend/tfplugin/latest/ascend_tfplugin_install.info +``` + + +If the version is older than expected, uninstall it as by root user. + +```bash +/usr/local/Ascend/driver/script/uninstall.sh +/usr/local/Ascend/nnae/latest/script/uninstall.sh +/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/script/uninstall.sh +/usr/local/Ascend/nnrt/latest/arm64-linux/script/uninstall.sh +/usr/local/Ascend/tfplugin/latest/script/uninstall.sh +``` + +If the platform is x86, replace `arm64-linux` in the directory contained in the preceding command with `x86_64-linux`. + +If nnae, ascend-toolkit, nnrt, and tfplugin are not installed by the root user, uninstall them as the user. + +## 2 Installing the Driver and CANN + +Run the following command as the root user to install the software. The following version number is for reference only: + +```bash +chmod +x *.run +./A300-3000-3010-npu-driver_21.0.2_linux-aarch64.run --full +``` + +Run the following command to check whether the installation is successful: + +```bash +npu-smi info +``` + +Before installing other packages as a non-root user, set this user to the same group as `HwHiAiUser`. + +```bash +usermod -a -G HwHiAiUser +` ` ` + +```bash +./Ascend-cann-nnae_5.0.T306_linux-aarch64.run --install +./Ascend-cann-nnrt_5.0.T306_linux-aarch64.run --install +./Ascend-cann-tfplugin_5.0.T306_linux-aarch64.run --install +./Ascend-cann-toolkit_5.0.T306_linux-aarch64.run --install +``` + +After the installation is complete, restart the host as prompted. + +## 3 Setting Environment Variables + +Set the following environment variables: + +```bash +export ASCEND_HOME=/usr/local/Ascend +export HOME_DIR=/home/ +export PATH=$HOME_DIR/.local/bin:$PATH +source /usr/local/Ascend/nnae/set_env.sh +source /usr/local/Ascend/nnrt/set_env.sh +source /usr/local/Ascend/tfplugin/set_env.sh +source /usr/local/Ascend/ascend-toolkit/set_env.sh +export NPU_HOST_LIB=/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/atc/lib64 +export PYTHONPATH=/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:$PYTHONPATH +export DDK_PATH=/usr/local/Ascend/ascend-toolkit/latest +export LD_LIBRARY_PATH=/usr/local/Ascend/ascend-toolkit/latest/arm64-linux/fwkacllib/lib64:/usr/local/Ascend/ascend-toolkit/latest/acllib/lib64:$LD_LIBRARY_PATH +export PYTHONPATH=$HOME_DIR/.local/lib/python3.7/site-packages/evaluate_service/security:$PYTHONPATH +export LD_LIBRARY_PATH=$HOME_DIR/.local/lib/python3.7/site-packages/evaluate_service/security/kmc/aarch64:$LD_LIBRARY_PATH +``` + +In the preceding command, `` indicates the user directory, +and `$NPU_HOST_LIB` indicates the path of `libascendcl.so`. +Set this variable based on the actual location of `libascendcl.so`. \ No newline at end of file diff --git a/evaluate_service/evaluate_service/__init__.py b/evaluate_service/evaluate_service/__init__.py new file mode 100644 index 0000000..42f0b74 --- /dev/null +++ b/evaluate_service/evaluate_service/__init__.py @@ -0,0 +1,19 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Evaluate service.""" + +__version__ = "1.8.0" diff --git a/evaluate_service/class_factory.py b/evaluate_service/evaluate_service/class_factory.py similarity index 70% rename from evaluate_service/class_factory.py rename to evaluate_service/evaluate_service/class_factory.py index 3100a74..dad504d 100644 --- a/evaluate_service/class_factory.py +++ b/evaluate_service/evaluate_service/class_factory.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Management class registration and bind configuration properties, provides the type of class supported.""" diff --git a/evaluate_service/evaluate_service/hardwares/__init__.py b/evaluate_service/evaluate_service/hardwares/__init__.py new file mode 100644 index 0000000..e1b24a5 --- /dev/null +++ b/evaluate_service/evaluate_service/hardwares/__init__.py @@ -0,0 +1,3 @@ +from .davinci.davinci import Davinci + +__all__ = ['Davinci'] diff --git a/evaluate_service/hardwares/davinci/compile_atlas300.sh b/evaluate_service/evaluate_service/hardwares/davinci/compile_atlas300.sh similarity index 76% rename from evaluate_service/hardwares/davinci/compile_atlas300.sh rename to evaluate_service/evaluate_service/hardwares/davinci/compile_atlas300.sh index 84dbbdf..8b8c781 100644 --- a/evaluate_service/hardwares/davinci/compile_atlas300.sh +++ b/evaluate_service/evaluate_service/hardwares/davinci/compile_atlas300.sh @@ -4,7 +4,7 @@ SAVE_PATH=$2 cd $EXAMPLE_DIR/ mkdir -p build/intermediates/host cd build/intermediates/host -cmake ../../../src -DCMAKE_CXX_COMPILER=g++ -DCMAKE_SKIP_RPATH=TRUE -DCMAKE_CXX_FLAGS="-s" -DCMAKE_C_FLAGS="-s" +cmake ../../../src -DCMAKE_CXX_COMPILER=g++ -DCMAKE_SKIP_RPATH=TRUE -DCMAKE_CXX_FLAGS="-s" -DCMAKE_C_FLAGS="-s" -DCMAKE_FORTIFY_SOURCE=2 make cd ../../../out diff --git a/evaluate_service/hardwares/davinci/davinci.py b/evaluate_service/evaluate_service/hardwares/davinci/davinci.py similarity index 67% rename from evaluate_service/hardwares/davinci/davinci.py rename to evaluate_service/evaluate_service/hardwares/davinci/davinci.py index f8c7cec..f95132c 100644 --- a/evaluate_service/hardwares/davinci/davinci.py +++ b/evaluate_service/evaluate_service/hardwares/davinci/davinci.py @@ -1,20 +1,27 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The hardware of davinci.""" -import subprocess +import datetime import logging import os -from evaluate_service.class_factory import ClassFactory -import datetime +import subprocess + import numpy as np +from evaluate_service.class_factory import ClassFactory @ClassFactory.register() @@ -49,7 +56,27 @@ def convert_model(self, backend, model, weight, **kwargs): except subprocess.CalledProcessError as exc: logging.error("convert model to om model failed. the return message is : {}.".format(exc)) - def inference(self, converted_model, input_data, **kwargs): + def _get_200dk_infer_cmd(self, save_path): + app_dir = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') + example_dir = self.current_path + "/samples/atlas200dk" + ddk_user_name = self.optional_params.get("ddk_user_name") + ddk_host_ip = self.optional_params.get("ddk_host_ip") + atlas_host_ip = self.optional_params.get("atlas_host_ip") + command_line = ["bash", self.current_path + "inference_atlas200.sh", + save_path, example_dir, ddk_user_name, ddk_host_ip, atlas_host_ip, app_dir] + return command_line + + def _compile_atlas300(self, save_path): + # compile the Davinci program + example_dir = self.current_path + "/samples/atlas300" + command_line = ["bash", self.current_path + "/compile_atlas300.sh", + example_dir, save_path] + try: + subprocess.check_output(command_line) + except subprocess.CalledProcessError as exc: + logging.error("compile failed. the return message is : {}.".format(exc)) + + def inference(self, converted_model, input_data, is_last=False, cal_metric=False, **kwargs): """Inference in Davinci. :param converted_model: converted model file @@ -64,25 +91,11 @@ def inference(self, converted_model, input_data, **kwargs): converted_model = os.path.join(converted_model, "davinci_model.om") log_save_path = os.path.dirname(input_data) if self.davinci_environment_type == "ATLAS200DK": - task_dir = log_save_path - app_dir = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') - example_dir = self.current_path + "/samples/atlas200dk" - ddk_user_name = self.optional_params.get("ddk_user_name") - ddk_host_ip = self.optional_params.get("ddk_host_ip") - atlas_host_ip = self.optional_params.get("atlas_host_ip") - command_line = ["bash", self.current_path + "/utils/atlas200_dk/inference_atlas300.sh", - task_dir, example_dir, ddk_user_name, ddk_host_ip, atlas_host_ip, app_dir] + command_line = self._get_200dk_infer_cmd(save_path=log_save_path) result_file = os.path.join(log_save_path, "result_file") else: if not os.path.exists(os.path.join(share_dir, "main")): - # compile the Davinci program - example_dir = self.current_path + "/samples/atlas300" - command_line = ["bash", self.current_path + "/compile_atlas300.sh", - example_dir, share_dir] - try: - subprocess.check_output(command_line) - except subprocess.CalledProcessError as exc: - logging.error("compile failed. the return message is : {}.".format(exc)) + self._compile_atlas300() # execute the Davinci program command_line = ["bash", self.current_path + "/inference_atlas300.sh", input_data, converted_model, share_dir, log_save_path] @@ -94,7 +107,13 @@ def inference(self, converted_model, input_data, **kwargs): logging.error("inference failed. the return message is : {}.".format(exc)) latency = self._get_latency(os.path.join(log_save_path, "ome.log")) - output = self._get_output(result_file) + if cal_metric: + output = self._get_output(result_file) + else: + output = None + if is_last: + os.remove(input_data) + os.remove(converted_model) return latency, output def _get_latency(self, log_file): diff --git a/evaluate_service/hardwares/davinci/get_latency_from_log.sh b/evaluate_service/evaluate_service/hardwares/davinci/get_latency_from_log.sh similarity index 100% rename from evaluate_service/hardwares/davinci/get_latency_from_log.sh rename to evaluate_service/evaluate_service/hardwares/davinci/get_latency_from_log.sh diff --git a/evaluate_service/hardwares/davinci/inference_atlas300.sh b/evaluate_service/evaluate_service/hardwares/davinci/inference_atlas300.sh similarity index 92% rename from evaluate_service/hardwares/davinci/inference_atlas300.sh rename to evaluate_service/evaluate_service/hardwares/davinci/inference_atlas300.sh index 3eb5466..d8a91aa 100644 --- a/evaluate_service/hardwares/davinci/inference_atlas300.sh +++ b/evaluate_service/evaluate_service/hardwares/davinci/inference_atlas300.sh @@ -10,4 +10,4 @@ cp $EXECUTE_FILE_PATH/acl.json $LOG_SAVE_PATH/ cd $LOG_SAVE_PATH/ #sudo env "LD_LIBRARY_PATH=/usr/local/Ascend/acllib/lib64:/usr/local/Ascend/add-ons:/usr/local/Ascend/driver/lib64/" ./main >$WORK_DIR/ome.log -./main >$LOG_SAVE_PATH/ome.log +./main >$LOG_SAVE_PATH/ome.log \ No newline at end of file diff --git a/evaluate_service/hardwares/davinci/model_convert.sh b/evaluate_service/evaluate_service/hardwares/davinci/model_convert.sh similarity index 76% rename from evaluate_service/hardwares/davinci/model_convert.sh rename to evaluate_service/evaluate_service/hardwares/davinci/model_convert.sh index edeb680..97f20b7 100644 --- a/evaluate_service/hardwares/davinci/model_convert.sh +++ b/evaluate_service/evaluate_service/hardwares/davinci/model_convert.sh @@ -9,21 +9,29 @@ PRECISION=$8 if [ $DAVINCI_ENV_TYPE == "ATLAS200DK" ]; then if [ $BACKEND == "tensorflow" ]; then - omg --model=$MODEL --framework=3 --output=$OM_SAVE_PATH/davinci_model >$LOG_SAVE_PATH/omg.log 2>&1 + omg --model=$MODEL --framework=3 --output=$OM_SAVE_PATH/davinci_model >$LOG_SAVE_PATH/omg.log 2>&1 && + rm -f $MODEL elif [ $BACKEND == "caffe" ]; then - omg --model=$MODEL --weight=$WEIGHT --framework=0 --output=$OM_SAVE_PATH/davinci_model >$LOG_SAVE_PATH/omg.log 2>&1 + omg --model=$MODEL --weight=$WEIGHT --framework=0 --output=$OM_SAVE_PATH/davinci_model >$LOG_SAVE_PATH/omg.log 2>&1 && + rm -f $MODEL + rm -f $WEIGHT else echo "[ERROR] Davinci model convert: The backend must be tensorflow, caffe." fi else if [ $BACKEND == "tensorflow" ]; then - atc --model=$MODEL --framework=3 --input_format='NCHW' --disable_reuse_memory=1 --input_shape=$INPUT_SHAPE --output=$OM_SAVE_PATH/davinci_model --soc_version=Ascend310 --core_type=AiCore --output_type=$PRECISION >$LOG_SAVE_PATH/omg.log 2>&1 + atc --model=$MODEL --framework=3 --input_format='NCHW' --disable_reuse_memory=1 --input_shape=$INPUT_SHAPE --output=$OM_SAVE_PATH/davinci_model --soc_version=Ascend310 --core_type=AiCore --output_type=$PRECISION >$LOG_SAVE_PATH/omg.log 2>&1 && + rm -f $MODEL elif [ $BACKEND == "caffe" ]; then - atc --model=$MODEL --weight=$WEIGHT --framework=0 --input_format='NCHW' --disable_reuse_memory=1 --output=$OM_SAVE_PATH/davinci_model --soc_version=Ascend310 --core_type=AiCore >$LOG_SAVE_PATH/omg.log 2>&1 + atc --model=$MODEL --weight=$WEIGHT --framework=0 --input_format='NCHW' --disable_reuse_memory=1 --output=$OM_SAVE_PATH/davinci_model --soc_version=Ascend310 --core_type=AiCore >$LOG_SAVE_PATH/omg.log 2>&1 && + rm -f $MODEL + rm -f $WEIGHT elif [ $BACKEND == "mindspore" ]; then - atc --model=$MODEL --framework=1 --disable_reuse_memory=1 --output=$OM_SAVE_PATH/davinci_model --soc_version=Ascend310 --core_type=AiCore --output_type=$PRECISION >$LOG_SAVE_PATH/omg.log 2>&1 + atc --model=$MODEL --framework=1 --disable_reuse_memory=1 --output=$OM_SAVE_PATH/davinci_model --soc_version=Ascend310 --core_type=AiCore --output_type=$PRECISION >$LOG_SAVE_PATH/omg.log 2>&1 && + rm -f $MODEL elif [ $BACKEND == "onnx" ]; then - atc --model=$MODEL --framework=5 --output=$OM_SAVE_PATH/davinci_model --soc_version=Ascend310 --core_type=AiCore --output_type=$PRECISION >$LOG_SAVE_PATH/omg.log 2>&1 + atc --model=$MODEL --framework=5 --output=$OM_SAVE_PATH/davinci_model --soc_version=Ascend310 --core_type=AiCore --output_type=$PRECISION >$LOG_SAVE_PATH/omg.log 2>&1 && + rm -f $MODEL else echo "[ERROR] Davinci model convert: The backend must be tensorflow, caffe, mindspore or onnx." fi diff --git a/evaluate_service/hardwares/davinci/samples/atlas300/inc/model_process.h b/evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/inc/model_process.h similarity index 100% rename from evaluate_service/hardwares/davinci/samples/atlas300/inc/model_process.h rename to evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/inc/model_process.h diff --git a/evaluate_service/hardwares/davinci/samples/atlas300/inc/sample_process.h b/evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/inc/sample_process.h similarity index 100% rename from evaluate_service/hardwares/davinci/samples/atlas300/inc/sample_process.h rename to evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/inc/sample_process.h diff --git a/evaluate_service/hardwares/davinci/samples/atlas300/inc/utils.h b/evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/inc/utils.h similarity index 100% rename from evaluate_service/hardwares/davinci/samples/atlas300/inc/utils.h rename to evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/inc/utils.h diff --git a/evaluate_service/hardwares/davinci/samples/atlas300/src/CMakeLists.txt b/evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/src/CMakeLists.txt similarity index 94% rename from evaluate_service/hardwares/davinci/samples/atlas300/src/CMakeLists.txt rename to evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/src/CMakeLists.txt index 098aa7d..c2d13ac 100644 --- a/evaluate_service/hardwares/davinci/samples/atlas300/src/CMakeLists.txt +++ b/evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/src/CMakeLists.txt @@ -10,9 +10,9 @@ project(ACL_RESNET50) add_compile_options(-std=c++11) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "../../../out") -set(CMAKE_CXX_FLAGS_DEBUG "-fPIC -O0 -g -Wall") +set(CMAKE_CXX_FLAGS_DEBUG "-fPIC -O2 -g -Wall") set(CMAKE_CXX_FLAGS_RELEASE "-fPIC -O2 -Wall") - +set(CMAKE_FORTIFY_SOURCE "2") set(INC_PATH $ENV{DDK_PATH}) if (NOT DEFINED ENV{DDK_PATH}) diff --git a/evaluate_service/hardwares/davinci/samples/atlas300/src/acl.json b/evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/src/acl.json similarity index 100% rename from evaluate_service/hardwares/davinci/samples/atlas300/src/acl.json rename to evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/src/acl.json diff --git a/evaluate_service/hardwares/davinci/samples/atlas300/src/main.cpp b/evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/src/main.cpp similarity index 100% rename from evaluate_service/hardwares/davinci/samples/atlas300/src/main.cpp rename to evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/src/main.cpp diff --git a/evaluate_service/hardwares/davinci/samples/atlas300/src/model_process.cpp b/evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/src/model_process.cpp similarity index 95% rename from evaluate_service/hardwares/davinci/samples/atlas300/src/model_process.cpp rename to evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/src/model_process.cpp index 1a76eb3..dd32b56 100644 --- a/evaluate_service/hardwares/davinci/samples/atlas300/src/model_process.cpp +++ b/evaluate_service/evaluate_service/hardwares/davinci/samples/atlas300/src/model_process.cpp @@ -30,21 +30,22 @@ double tick(void) } -double difftimeval(const struct timeval *start, const struct timeval *end) +double eplasedtime(const struct timeval *end_time, const struct timeval *start_time) { - double d; - time_t s; - suseconds_t u; - - s = start->tv_sec - end->tv_sec; - u = start->tv_usec - end->tv_usec; - d = s; - d *= 1000000.0; - d += u; - return d; + double time_total; + time_t time_second; + suseconds_t time_microsecond ; + + time_second = end_time->tv_sec - start_time->tv_sec; + time_microsecond = end_time->tv_usec - start_time->tv_usec; + time_total = time_second; + time_total *= 1000000.0; + time_total += time_microsecond; + return time_total; } + ModelProcess::ModelProcess() :modelId_(0), modelMemSize_(0), modelWeightSize_(0), modelMemPtr_(nullptr), modelWeightPtr_(nullptr), loadFlag_(false), modelDesc_(nullptr), input_(nullptr), output_(nullptr) { @@ -327,7 +328,7 @@ Result ModelProcess::Execute() gettimeofday(&start, NULL); aclError ret = aclmdlExecute(modelId_, input_, output_); gettimeofday(&end, NULL); - cout<< "costTime "<< difftimeval(&end, &start)/1000<(end_time-start_time)/CLOCKS_PER_SEC*1000< 0: - logging.warning("job_id {} contains invalid characters".format(job_id)) - abort(400, "job_id {} contains invalid characters".format(job_id)) - return job_id + if self.security_mode: + security.args.check_backend(self.backend) + security.args.check_hardware(self.hardware) + security.args.check_job_id(self.job_id) + security.args.check_input_shape(self.input_shape) + security.args.check_out_nodes(self.out_nodes) + security.args.check_repeat_times(self.repeat_times) + security.args.check_precision(self.precision) def upload_files(self): """Upload the files from the client to the service.""" @@ -167,20 +172,28 @@ def upload_files(self): self.upload_file_path = os.path.join(self.current_path, "out", self.now_time) self.share_dir = os.path.join(self.current_path, "out", self.job_id) os.makedirs(self.upload_file_path) - + os.makedirs(self.share_dir) + patterns = [".pkl", ".pth", ".pt", ".pb", ".ckpt", ".air", '.om', + ".onnx", ".caffemodel", ".pbtxt", ".prototxt"] model_file = request.files.get("model_file") if model_file is not None: self.model = self.upload_file_path + "/" + secure_filename(model_file.filename) + if os.path.splitext(self.model)[1] not in patterns: + raise ValueError(f'{model_file.filename} file type is not supported.') model_file.save(self.model) data_file = request.files.get("data_file") if data_file is not None: self.input_data = self.upload_file_path + "/" + secure_filename(data_file.filename) + if not os.path.basename(self.input_data) == 'input.bin': + raise ValueError(f'data {data_file.filename} file is not supported.') data_file.save(self.input_data) weight_file = request.files.get("weight_file") if weight_file is not None: self.weight = self.upload_file_path + "/" + secure_filename(weight_file.filename) + if os.path.splitext(self.weight)[1] not in patterns: + raise ValueError(f'{weight_file.filename} file type is not supported.') weight_file.save(self.weight) else: self.weight = "" @@ -190,7 +203,6 @@ def upload_files(self): def _clean_data_path(clean_interval, work_path): while True: _clean_time = time.time() - clean_interval - # _current_path = os.path.dirname(os.path.abspath(__file__)) folder_pattern = "{}/out/*".format(work_path) folders = glob.glob(folder_pattern) for folder in folders: @@ -216,26 +228,29 @@ def _parse_args(): help="the user to acess ATLAS200200 DK") parser.add_argument("-atlas_host_ip", "--atlas_host_ip", type=str, required=False, default=None, help="the ip of ATLAS200200 DK") - + parser.add_argument("-s", "--security_mode", action='store_true', + help="enable safe mode") args = parser.parse_args() return args def run(): """Run the evaluate service.""" - os.umask(0o027) args = _parse_args() ip_address = args.host_ip listen_port = args.port clean_interval = args.clean_interval work_path = args.work_path + security_mode = args.security_mode + if security_mode: + os.umask(0o077) optional_params = {"davinci_environment_type": args.davinci_environment_type, "ddk_user_name": args.ddk_user_name, "atlas_host_ip": args.atlas_host_ip } - p = multiprocessing.Process(target=_clean_data_path, args=(clean_interval, work_path), daemon=True) p.start() - Evaluate._add_params(work_path, optional_params) + Evaluate._add_params(work_path, args.security_mode, optional_params) api.add_resource(Evaluate, '/') - run_flask(app, host=ip_address, port=listen_port) + + run_flask(app, host=ip_address, port=listen_port, security_mode=security_mode) diff --git a/evaluate_service/evaluate_service/run_flask.py b/evaluate_service/evaluate_service/run_flask.py new file mode 100644 index 0000000..ba50dcb --- /dev/null +++ b/evaluate_service/evaluate_service/run_flask.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run Flask.""" + +import configparser +import logging +import os +from multiprocessing import Process +import gevent +from gevent import pywsgi +from .security.utils import create_context +from .security.verify_config import check_risky_files + +white_list = None +request_frequency_limit = "100/minute" +max_content_length = 1000 * 1000 * 1000 + + +def get_request_frequency_limit(): + """Get request frequncy limit.""" + global request_frequency_limit + return request_frequency_limit + + +def get_max_content_length(): + """Get max contect length.""" + global max_content_length + return max_content_length + + +def get_white_list(): + """Get white list.""" + global white_list + return white_list + + +def load_security_setting(): + """Load security settings.""" + home = os.environ['HOME'] + config_file = os.path.join(home, ".vega/vega.ini") + if not check_risky_files([config_file]): + return False + cfg = configparser.ConfigParser() + cfg.read(config_file) + config = dict(cfg._sections) + for k in config: + config[k] = dict(config[k]) + + return config + + +def run_flask(app, host, port, security_mode): + """Run flask.""" + if security_mode: + app.config['MAX_CONTENT_LENGTH'] = get_max_content_length() + config = load_security_setting() + if not config: + return False + ca_cert = config.get('security').get('ca_cert') + server_cert = config.get('security').get('server_cert') + server_secret_key = config.get('security').get('server_secret_key') + encrypted_password = config.get('security').get('encrypted_password') + key_component_1 = config.get('security').get('key_component_1') + key_component_2 = config.get('security').get('key_component_2') + if not check_risky_files((ca_cert, server_cert, server_secret_key, key_component_1, key_component_2)): + return + try: + if encrypted_password == "": + ssl_context = create_context(ca_cert, server_cert, server_secret_key) + else: + ssl_context = create_context(ca_cert, server_cert, server_secret_key, + encrypted_password, key_component_1, key_component_2) + except Exception: + logging.error("Fail to create context.") + return False + + server = pywsgi.WSGIServer((host, port), app, ssl_context=ssl_context) + if "limit" in config: + global white_list + global request_frequency_limit + global max_content_length + if "white_list" in config["limit"]: + white_list = config["limit"]["white_list"].replace(" ", "").split(',') + if "request_frequency_limit" in config["limit"]: + request_frequency_limit = config["limit"]["request_frequency_limit"] + if "max_content_length" in config["limit"]: + max_content_length = int(config["limit"]["max_content_length"]) + else: + server = pywsgi.WSGIServer((host, port), app) + + server.init_socket() + server._stop_event.clear() + + def _server_forever(): + server.start_accepting() + logging.info("server started.") + server._stop_event.wait() + gevent.wait() + + p = Process(target=_server_forever) + p.start() diff --git a/evaluate_service/evaluate_service/security/__init__.py b/evaluate_service/evaluate_service/security/__init__.py new file mode 100644 index 0000000..c014103 --- /dev/null +++ b/evaluate_service/evaluate_service/security/__init__.py @@ -0,0 +1,25 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run pipeline.""" + +__all__ = ["load_config", "get_config", "add_args", "check_args", "check_yml", "check_msg", "post"] + +from .conf import ServerConfig, ClientConfig, Config +from .args import add_args, check_args, check_yml, check_msg +from .post import post +from .conf import load_config, get_config +from .verify_config import check_risky_file diff --git a/evaluate_service/evaluate_service/security/args.py b/evaluate_service/evaluate_service/security/args.py new file mode 100644 index 0000000..1a2245b --- /dev/null +++ b/evaluate_service/evaluate_service/security/args.py @@ -0,0 +1,120 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Security args.""" +import os +import re +import yaml + + +def add_args(parser): + """Add security args.""" + _config = parser.add_argument_group(title='security setting') + _config.add_argument("-s", "--security", dest='security', action='store_true', + help="enable safe mode") + return parser + + +def _check_value(value, pattern): + if isinstance(value, str) and len(re.compile(pattern).findall(value)) > 0: + raise ValueError("{} contains invalid characters.".format(value)) + + +def _check_dict(dict_value, pattern): + """Check dict.""" + if not isinstance(dict_value, dict): + return + for item in dict_value: + value = dict_value[item] + if isinstance(value, dict): + _check_dict(value, pattern) + else: + _check_value(value, pattern) + + +def check_msg(msg): + """Check msg.""" + _check_dict(msg, pattern="[^_A-Za-z0-9\\s:/.~-]") + + +def check_args(args): + """Check args.""" + args_dict = vars(args) + _check_dict(args_dict, pattern="[^_A-Za-z0-9:/.~-]") + + +def check_yml(config_yaml): + """Check yml.""" + if config_yaml is None: + raise ValueError("config path can't be None or empty") + if os.stat(config_yaml).st_uid != os.getuid(): + raise ValueError(f"The file {config_yaml} not belong to the current user") + with open(config_yaml) as f: + raw_dict = yaml.safe_load(f) + _check_dict(raw_dict, pattern=r"[^_A-Za-z0-9\s\<\>=\[\]\(\),!\{\}:/.~-]") + + +def check_job_id(job_id): + """Check Job id.""" + if not isinstance(job_id, str): + raise TypeError('"job_id" must be str, not {}'.format(type(job_id))) + _check_value(job_id, pattern="[^_A-Za-z0-9]") + + +def check_input_shape(input_shape): + """Check input shape.""" + if not isinstance(input_shape, str): + raise TypeError('"input_shape" must be str, not {}'.format(type(input_shape))) + _check_value(input_shape, pattern="[^_A-Za-z0-9:,]") + + +def check_out_nodes(out_nodes): + """Check out nodes.""" + if not isinstance(out_nodes, str): + raise TypeError('"out_nodes" must be str, not {}'.format(type(out_nodes))) + _check_value(out_nodes, pattern="[^_A-Za-z0-9:/]") + + +def check_backend(backend): + """Check backend.""" + if backend not in ["tensorflow", "caffe", "onnx", "mindspore"]: + raise ValueError("The backend only support tensorflow, caffe, onnx and mindspore.") + + +def check_hardware(hardware): + """Check hardware.""" + if hardware not in ["Davinci", "Bolt", "Kirin990_npu"]: + raise ValueError("The hardware only support Davinci and Bolt.") + + +def check_precision(precision): + """Check precision.""" + if precision.upper() not in ["FP32", "FP16"]: + raise ValueError("The precision only support FP32 and FP16.") + + +def check_repeat_times(repeat_times): + """Check repeat times.""" + MAX_EVAL_EPOCHS = 10000 + if not isinstance(repeat_times, int): + raise TypeError('"repeat_times" must be int, not {}'.format(type(repeat_times))) + if not 0 < repeat_times <= MAX_EVAL_EPOCHS: + raise ValueError("repeat_times {} is not in valid range (1-{})".format(repeat_times, MAX_EVAL_EPOCHS)) + + +def path_verify(path): + """Verify path.""" + return re.sub(r"[^_A-Za-z0-9\/.]", "", path) diff --git a/evaluate_service/evaluate_service/security/check_env.py b/evaluate_service/evaluate_service/security/check_env.py new file mode 100644 index 0000000..c394a02 --- /dev/null +++ b/evaluate_service/evaluate_service/security/check_env.py @@ -0,0 +1,25 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Check security env.""" + + +__all__ = ["check_env"] + + +def check_env(args) -> bool: + """Check security env.""" + return True diff --git a/evaluate_service/evaluate_service/security/conf.py b/evaluate_service/evaluate_service/security/conf.py new file mode 100644 index 0000000..4e9fa03 --- /dev/null +++ b/evaluate_service/evaluate_service/security/conf.py @@ -0,0 +1,140 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Security config. + +~/.vega/server.ini + +[security] + ca_cert=<~/.vega/car.crt> + server_cert_dask=<~/.vega/server_dask.crt> + server_secret_key_dask=<~/.vega/server_dask.key> + client_cert_dask=<~/.vega/client_dask.crt> + client_secret_key_dask=<~/.vega/ client_dask.key> + +~/.vega/client.ini + +[security] + ca_cert=<~/.vega/car.crt> + client_cert=<~/.vega/client.crt> + client_secret_key=<~/.vega/client.key> + encrypted_password= + key_component_1=<~/.vega/ksmaster_client.dat> + key_component_2=<~/.vega/ksstandby_client.dat> + +""" + +import os +import logging +import configparser +from .verify_config import check_risky_files + + +class Config(): + """Security Config.""" + + def load(self) -> bool: + """Load from config file.""" + if not check_risky_files([self.file_name]): + return False + config = configparser.ConfigParser() + try: + config.read(self.file_name) + except Exception: + logging.error(f"Failed to read setting from {self.file_name}") + return False + if "security" not in config.sections(): + return False + keys = [] + pass_check_keys = ["encrypted_password", "white_list"] + for key in config["security"]: + if key not in self.keys: + return False + setattr(self, key, config.get("security", key)) + if key not in pass_check_keys and not check_risky_files([config.get("security", key)]): + return False + keys.append(key) + if len(keys) != len(self.keys): + missing_keys = list(set(self.keys) - set(keys)) + logging.error(f"setting items {missing_keys} are missing in {self.file_name}") + return False + return True + + +class ServerConfig(Config): + """Security Config.""" + + def __init__(self): + """Initialize.""" + self.ca_cert = None + self.server_cert_dask = None + self.server_secret_key_dask = None + self.client_cert_dask = None + self.client_secret_key_dask = None + self.file_name = os.path.expanduser("~/.vega/server.ini") + self.keys = ["ca_cert", "server_cert_dask", "server_secret_key_dask", "client_cert_dask", + "client_secret_key_dask"] + + +class ClientConfig(Config): + """Security Config.""" + + def __init__(self): + """Initialize.""" + self.ca_cert = None + self.client_cert = None + self.client_secret_key = None + self.encrypted_password = None + self.key_component_1 = None + self.key_component_2 = None + self.white_list = [] + self.file_name = os.path.expanduser("~/.vega/client.ini") + self.keys = [ + "ca_cert", "client_cert", "client_secret_key", "encrypted_password", + "key_component_1", "key_component_2", "white_list"] + + +_server_config = ServerConfig() +_client_config = ClientConfig() + + +def load_config(_type: str) -> bool: + """Load security config.""" + if _type not in ["all", "server", "client"]: + logging.error(f"not support security config type: {_type}") + return False + if _type in ["server", "all"]: + global _server_config + if not _server_config.load(): + logging.error("load server security config fail.") + return False + if _type in ["client", "all"]: + global _client_config + if not _client_config.load(): + logging.error("load client security config fail.") + return False + return True + + +def get_config(_type: str) -> Config: + """Get config.""" + if _type not in ["server", "client"]: + logging.error(f"not support security config type: {_type}") + return False + if _type == "server": + return _server_config + else: + return _client_config diff --git a/evaluate_service/evaluate_service/security/kmc/encrypt_key.py b/evaluate_service/evaluate_service/security/kmc/encrypt_key.py new file mode 100644 index 0000000..7691c1d --- /dev/null +++ b/evaluate_service/evaluate_service/security/kmc/encrypt_key.py @@ -0,0 +1,121 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Load the Certificate and encrypt the passwd.""" + +import argparse +import getpass +import logging +import subprocess +from OpenSSL.crypto import load_certificate, FILETYPE_PEM, load_privatekey +from . import kmc +from .utils import check_password_rule + + +def encrypt_mm(origin_mm, key_component_1, key_component_2): + """Encrypt the passwd.""" + ret = kmc.init(key_component_1, key_component_2, 9) + if ret is False: + logging.error("kmc init error.") + return "" + domain_id = 0 + result = kmc.encrypt(domain_id, origin_mm) + kmc.finalize() + return result + + +def validate_certificate(cert, key, origin_mm): + """Validate the certificate.""" + flag = True + with open(key, "r", encoding="utf-8") as f: + key_value = f.read() + try: + load_privatekey(FILETYPE_PEM, key_value, passphrase=origin_mm.encode('utf-8')) + except Exception: + flag = False + logging.error("Wrong PEM.") + return flag + + # check signature algorithm + with open(cert, "r", encoding="utf-8") as f: + cert_value = f.read() + cert_value = load_certificate(FILETYPE_PEM, cert_value) + enc_algorithm = cert_value.get_signature_algorithm() + if enc_algorithm in b'sha1WithRSAEncryption' b'md5WithRSAEncryption': + logging.warning("Insecure encryption algorithm: %s", enc_algorithm) + # check key length + + p1 = subprocess.Popen(["openssl", "x509", "-in", cert, "-text", "-noout"], + stdout=subprocess.PIPE, shell=False) + p2 = subprocess.Popen(["grep", "RSA Public-Key"], stdin=p1.stdout, stdout=subprocess.PIPE, shell=False) + p3 = subprocess.Popen(["tr", "-cd", "[0-9]"], stdin=p2.stdout, stdout=subprocess.PIPE, shell=False) + RSA_key = p3.communicate()[0] + if int(RSA_key) < 2048: + logging.warning("Insecure key length: %d", int(RSA_key)) + return flag + + +def import_certificate(args, origin_mm): + """Load the certificate.""" + # 1.validate private key and certification, if not pass, program will exit + ret = validate_certificate(args.cert, args.key, origin_mm) + if not ret: + logging.error("Validate certificate failed.") + return 0 + + # 2.encrypt private key's passwd. + encrypt = encrypt_mm(origin_mm, args.key_component_1, args.key_component_2) + if not encrypt: + logging.error("kmc encrypt private key error.") + return 0 + logging.warning(f"Encrypt sucuess. The encrypted of your input is {encrypt}") + logging.warning(f"The key components are {args.key_component_1} and {args.key_component_2}, please keep it safe.") + + return True + + +def args_parse(): + """Parse the input args.""" + parser = argparse.ArgumentParser(description='Certificate import') + parser.add_argument("--cert", default="./kmc/config/crt/sever.cert", type=str, + help="The path of certificate file") + parser.add_argument("--key", default='./kmc/config/crt/sever.key', type=str, + help="The path of private Key file.") + parser.add_argument("--key_component_1", default='./kmc/config/ksf/ksmaster.dat', type=str, + help="key material 1.") + parser.add_argument("--key_component_2", default='./kmc/config/ksf/ksstandby.dat', type=str, + help="key material 2.") + + args = parser.parse_args() + + return args + + +def main(): + """Run the encrypt process.""" + args = args_parse() + logging.info("process encrypt begin.") + origin_mm = getpass.getpass("Please enter the password to be encrypted: ") + if not check_password_rule(origin_mm): + logging.info("You should re-generate your server cert/key with following rules:") + logging.info("1. equals to or longer than 8 letters") + logging.info("2. contains at least one digit letter") + logging.info("3. contains at least one capital letter") + logging.info("4. contains at least one lowercase letter") + + ret = import_certificate(args, origin_mm) + if not ret: + logging.error("Encrypt failed.") diff --git a/evaluate_service/evaluate_service/security/kmc/kmc.py b/evaluate_service/evaluate_service/security/kmc/kmc.py new file mode 100644 index 0000000..2dcf548 --- /dev/null +++ b/evaluate_service/evaluate_service/security/kmc/kmc.py @@ -0,0 +1,228 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Huawei KMC library.""" + +import ctypes +import os +from ctypes.util import find_library +import logging +import platform + +__all__ = ["init", "encrypt", "decrypt", "check_and_update_mk", "update_root_key", "hmac", "hmac_verify", "finalize"] + +_kmc_dll: ctypes.CDLL = None +_libc_dll: ctypes.CDLL = None +ADVANCE_DAY = 3 + + +def hmac(domain_id: int, plain_text: str) -> str: + """Encode HMAC code.""" + p_char = ctypes.c_char_p() + hmac_len = ctypes.c_int(0) + c_plain_text = ctypes.create_string_buffer(plain_text.encode()) + _kmc_dll.KeHmacByDomain.restype = ctypes.c_int + _kmc_dll.KeHmacByDomain.argtypes = [ + ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_int)] + ret = _kmc_dll.KeHmacByDomain( + domain_id, c_plain_text, len(plain_text), ctypes.byref(p_char), ctypes.pointer(hmac_len)) + if ret != 0: + logging.error(f"failed to call KeHmacByDomain, code={ret}") + value = p_char.value.decode() + ret = _libc_dll.free(p_char) + if ret != 0: + logging.error(f"failed to free resource, code={ret}") + return value + + +def hmac_verify(domain_id: int, plain_text: str, hmac_text: str) -> bool: + """Verify HMAC code.""" + c_plain_text = ctypes.create_string_buffer(plain_text.encode()) + c_hmac_text = ctypes.create_string_buffer(hmac_text.encode()) + _kmc_dll.KeHmacVerifyByDomain.restype = ctypes.c_int + _kmc_dll.KeHmacVerifyByDomain.argtypes = [ + ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_int] + ret = _kmc_dll.KeHmacVerifyByDomain(domain_id, c_plain_text, len(plain_text), c_hmac_text, len(c_hmac_text)) + return ret + + +def encrypt(domain_id: int, plain_text: str) -> str: + """Encrypt.""" + p_char = ctypes.c_char_p() + cipher_len = ctypes.c_int(0) + c_plain_text = ctypes.create_string_buffer(plain_text.encode()) + + _kmc_dll.KeEncryptByDomain.restype = ctypes.c_int + _kmc_dll.KeEncryptByDomain.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), + ctypes.POINTER(ctypes.c_int)] + ret = _kmc_dll.KeEncryptByDomain(domain_id, c_plain_text, len(plain_text), ctypes.byref(p_char), + ctypes.pointer(cipher_len)) + if ret != 0: + logging.error("KeEncryptByDomain failed.") + return "" + value = p_char.value.decode() + ret = _libc_dll.free(p_char) + if ret != 0: + logging.error("free memory error. ret=%d" % ret) + return value + + +def _decrypt(domain_id: int, cipher_text: str): + """Decrypt.""" + p_char = ctypes.c_char_p() + plain_len = ctypes.c_int(0) + c_cipher_text = ctypes.create_string_buffer(cipher_text.encode()) + _kmc_dll.KeDecryptByDomain.restype = ctypes.c_int + _kmc_dll.KeDecryptByDomain.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), + ctypes.POINTER(ctypes.c_int)] + ret = _kmc_dll.KeDecryptByDomain(domain_id, c_cipher_text, len(cipher_text), ctypes.byref(p_char), + ctypes.pointer(plain_len)) + if ret != 0: + logging.error("KeDecryptByDomain failed.") + return "" + value = p_char.value.decode() + ret = _libc_dll.free(p_char) + if ret != 0: + logging.error("free memory error. ret=%d" % ret) + return value + + +def check_and_update_mk(domain_id: int, advance_day: int) -> bool: + """Check and update mk.""" + ret = _kmc_dll.KeCheckAndUpdateMk(domain_id, advance_day) + if ret != 0: + logging.error(f"failed to call KeCheckAndUpdateMk, code={ret}") + return False + return True + + +def update_root_key() -> bool: + """Update root key.""" + ret = _kmc_dll.KeUpdateRootKey() + if ret != 0: + logging.error(f"failed to call KeUpdateRootKey, code={ret}") + return False + return True + + +def finalize() -> None: + """Finalize.""" + _kmc_dll.KeFinalize.restype = ctypes.c_int + _kmc_dll.KeFinalize.argtypes = [] + _kmc_dll.KeFinalize() + + +def _get_lib_path(): + pkg_path = os.path.dirname(__file__) + if platform.processor() == "x86_64": + return os.path.join(pkg_path, "x86_64/libkmcext.so") + else: + return os.path.join(pkg_path, "aarch64/libkmcext.so") + + +def _load_dll(kmc_dll_path: str) -> None: + global _kmc_dll + if _kmc_dll: + return + global _libc_dll + if _libc_dll: + return + _libc_dll = ctypes.CDLL(find_library("c")) + _kmc_dll = ctypes.CDLL(kmc_dll_path) + + +@ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p) +def _logger(level: ctypes.c_int, msg: ctypes.c_char_p): + logging.info("level:%d, msg:%s" % (level, str(msg))) + + +def _init_log(): + _kmc_dll.KeSetLoggerCallback.restype = None + _kmc_dll.KeSetLoggerCallback.argtypes = [ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p)] + _kmc_dll.KeSetLoggerCallback(_logger) + _kmc_dll.KeSetLoggerLevel.restype = None + _kmc_dll.KeSetLoggerLevel.argtypes = [ctypes.c_int] + _kmc_dll.KeSetLoggerLevel(2) # DISABLE(0),ERROR(1),WARN(2),INFO(3),DEBUG(4),TRACE(5) + + +class KMCConfig(ctypes.Structure): + _fields_ = [ + ("primaryKeyStoreFile", ctypes.c_char * 4096), + ("standbyKeyStoreFile", ctypes.c_char * 4096), + ("domainCount", ctypes.c_int), + ("role", ctypes.c_int), + ("procLockPerm", ctypes.c_int), + ("sdpAlgId", ctypes.c_int), + ("hmacAlgId", ctypes.c_int), + ("semKey", ctypes.c_int) + ] + + +def _init_kmc_config(primary_key_store_file, standby_key_store_file, alg_id, domain_count): + config = KMCConfig() + config.primaryKeyStoreFile = primary_key_store_file.encode() + config.standbyKeyStoreFile = standby_key_store_file.encode() + config.domainCount = domain_count + config.role = 1 # Agent 0; Master 1 + config.procLockPerm = 0o0600 + config.sdpAlgId = alg_id + config.hmacAlgId = 2052 # HMAC_SHA256 2052; HMAC_SHA384 2053 HMAC_SHA512 2054 + config.semKey = 0x20161516 + _kmc_dll.KeInitialize.restype = ctypes.c_int + _kmc_dll.KeInitialize.argtypes = [ctypes.POINTER(KMCConfig)] + return _kmc_dll.KeInitialize(ctypes.byref(config)) + + +def init(primary_key_store_file: str, standby_key_store_file: str, alg_id: int, domain_count=3) -> bool: + """Initialize.""" + if alg_id not in [5, 7, 8, 9]: # AES128_CBC, AES256_CBC, AES128_GCM, AES256_GCM + logging.error(f"alg (id={alg_id}) is not legal") + return False + _load_dll(_get_lib_path()) + _init_log() + ret = _init_kmc_config(primary_key_store_file, standby_key_store_file, alg_id, domain_count) + if ret != 0: + logging.error(f"failed to call KeInitialized, code={ret}") + return False + return True + + +def decrypt(cert_pem_file, secret_key_file, key_mm, key_component_1, key_component_2): + """Decrypt the passwd.""" + sdp_alg_id = 9 + # Make sure ssl certificate file exist + ca_file_list = (cert_pem_file, secret_key_file) + for file in ca_file_list: + if file and os.path.exists(file): + continue + else: + logging.error("SSL Certificate files does not exist! Please check config.yaml and cert file.") + raise FileNotFoundError + + primary_keyStoreFile = key_component_1 + standby_keyStoreFile = key_component_2 + ret = init(primary_keyStoreFile, standby_keyStoreFile, sdp_alg_id) + if ret is False: + logging.error("kmc init error.") + raise Exception('ERROR: kmc init failed!') + domain_id = 0 + decrypt_mm = _decrypt(domain_id, key_mm) + if decrypt_mm == "": + logging.error("kmc init error.") + raise Exception('ERROR: kmc init failed!') + check_and_update_mk(domain_id, ADVANCE_DAY) + finalize() + return decrypt_mm diff --git a/evaluate_service/evaluate_service/security/kmc/utils.py b/evaluate_service/evaluate_service/security/kmc/utils.py new file mode 100644 index 0000000..f99bf2f --- /dev/null +++ b/evaluate_service/evaluate_service/security/kmc/utils.py @@ -0,0 +1,44 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Some tools.""" +import re +import logging + + +def check_password_rule(password): + """Check password rule.""" + digit_regex = re.compile(r'\d') + upper_regex = re.compile(r'[A-Z]') + lower_regex = re.compile(r'[a-z]') + + if len(password) < 8: + logging.warning("The length must >= 8") + return False + + if len(digit_regex.findall(password)) == 0: + logging.warning("Must contains digit letters") + return False + + if len(upper_regex.findall(password)) == 0: + logging.warning("Must contains capital letters") + return False + + if len(lower_regex.findall(password)) == 0: + logging.warning("Must contains lowercase letters") + return False + + return True diff --git a/evaluate_service/evaluate_service/security/load_pickle.py b/evaluate_service/evaluate_service/security/load_pickle.py new file mode 100644 index 0000000..df63f23 --- /dev/null +++ b/evaluate_service/evaluate_service/security/load_pickle.py @@ -0,0 +1,57 @@ +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Load pickle.""" + +import pickle + +__all__ = ["restricted_loads"] + + +safe_builtins = { + 'vega', + 'torch', + 'torchvision', + 'functools', + 'timm', + 'mindspore', + 'tensorflow', + 'numpy', + 'imageio', + 'collections', +} + + +class RestrictedUnpickler(pickle.Unpickler): + """Restrict unpickler.""" + + def __init__(self, file, fix_imports, encoding, errors, security): + super(RestrictedUnpickler, self).__init__(file=file, fix_imports=fix_imports, encoding=encoding, errors=errors) + self.security = security + + def find_class(self, module, name): + """Find class.""" + _class = super().find_class(module, name) + if self.security: + if module.split('.')[0] in safe_builtins: + return _class + raise pickle.UnpicklingError(f"global '{module}' is forbidden") + else: + return _class + + +def restricted_loads(file, fix_imports=True, encoding="ASCII", errors="strict", security=False): + """Load obj.""" + return RestrictedUnpickler(file, fix_imports=fix_imports, encoding=encoding, errors=errors, + security=security).load() diff --git a/evaluate_service/evaluate_service/security/post.py b/evaluate_service/evaluate_service/security/post.py new file mode 100644 index 0000000..a5110e1 --- /dev/null +++ b/evaluate_service/evaluate_service/security/post.py @@ -0,0 +1,57 @@ +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rest post operation in security mode.""" + +import urllib +import json +import logging +import requests +from .conf import get_config +from .utils import create_context +from .args import check_msg +from .verify_cert import verify_cert + + +def post(host, files, data): + """Post a REST requstion in security mode.""" + sec_cfg = get_config('client') + + ca_file = sec_cfg.ca_cert + cert_pem_file = sec_cfg.client_cert + secret_key_file = sec_cfg.client_secret_key + encrypted_password = sec_cfg.encrypted_password + key_component_1 = sec_cfg.key_component_1 + key_component_2 = sec_cfg.key_component_2 + + if not cert_pem_file or not secret_key_file or not ca_file: + logging.error("CERT file is not existed.") + + if not verify_cert(ca_file, cert_pem_file): + logging.error(f"The cert {ca_file} and {cert_pem_file} are invalid, please check.") + + if encrypted_password == "": + context = create_context(ca_file, cert_pem_file, secret_key_file) + else: + context = create_context(ca_file, cert_pem_file, secret_key_file, encrypted_password, key_component_1, + key_component_2) + if host.lower().startswith('https') is False: + raise Exception(f'The host {host} must start with https') + prepped = requests.Request(method="POST", url=host, files=files, data=data).prepare() + request = urllib.request.Request(host, data=prepped.body, method='POST') + request.add_header("Content-Type", prepped.headers['Content-Type']) + response = urllib.request.urlopen(request, context=context) # nosec + result = json.loads(response.read().decode('utf8')) + check_msg(dict((key, value) for key, value in result.items() if key != 'error_message')) + return result diff --git a/evaluate_service/evaluate_service/security/run_dask.py b/evaluate_service/evaluate_service/security/run_dask.py new file mode 100644 index 0000000..f403954 --- /dev/null +++ b/evaluate_service/evaluate_service/security/run_dask.py @@ -0,0 +1,139 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run dask scheduler and worker.""" +import os +import subprocess +import shutil +import logging +import socket +import random +from distributed import Client +from distributed.security import Security +from .conf import get_config +from .verify_cert import verify_cert + + +sec_cfg = get_config('server') + + +def get_client_security(address): + """Get client.""" + address = address.replace("tcp", "tls") + if not verify_cert(sec_cfg.ca_cert, sec_cfg.client_cert_dask): + logging.error(f"The cert {sec_cfg.ca_cert} and {sec_cfg.client_cert_dask} are invalid, please check.") + sec = Security(tls_ca_file=sec_cfg.ca_cert, + tls_client_cert=sec_cfg.client_cert_dask, + tls_client_key=sec_cfg.client_secret_key_dask, + require_encryption=True) + return Client(address, security=sec) + + +def get_address_security(master_host, master_port): + """Get address.""" + return "tls://{}:{}".format(master_host, master_port) + + +def run_scheduler_security(ip, port, tmp_file): + """Run scheduler.""" + if not verify_cert(sec_cfg.ca_cert, sec_cfg.server_cert_dask): + logging.error(f"The cert {sec_cfg.ca_cert} and {sec_cfg.server_cert_dask} are invalid, please check.") + return subprocess.Popen( + [ + "dask-scheduler", + "--no-dashboard", + "--no-show", + f"--tls-ca-file={sec_cfg.ca_cert}", + f"--tls-cert={sec_cfg.server_cert_dask}", + f"--tls-key={sec_cfg.server_secret_key_dask}", + f"--host={ip}", + "--protocol=tls", + f"--port={port}", + f"--scheduler-file={tmp_file}", + f"--local-directory={os.path.dirname(tmp_file)}", + ], + env=os.environ + ) + + +def _available_port(min_port, max_port) -> int: + _sock = socket.socket() + while True: + port = random.randint(min_port, max_port) + try: + _sock.bind(('', port)) + _sock.close() + return port + except Exception: + logging.debug('Failed to get available port, continue.') + continue + return None + + +def run_local_worker_security(slave_ip, address, local_dir): + """Run dask-worker on local node.""" + address = address.replace("tcp", "tls") + nanny_port = _available_port(30000, 30999) + worker_port = _available_port(29000, 29999) + pid = subprocess.Popen( + [ + "dask-worker", + address, + '--nthreads=1', + '--nprocs=1', + '--memory-limit=0', + f"--local-directory={local_dir}", + f"--tls-ca-file={sec_cfg.ca_cert}", + f"--tls-cert={sec_cfg.client_cert_dask}", + f"--tls-key={sec_cfg.client_secret_key_dask}", + "--no-dashboard", + f"--host={slave_ip}", + "--protocol=tls", + f"--nanny-port={nanny_port}", + f"--worker-port={worker_port}", + ], + env=os.environ + ) + return pid + + +def run_remote_worker_security(slave_ip, address, local_dir): + """Run dask-worker on remote node.""" + address = address.replace("tcp", "tls") + nanny_port = _available_port(30000, 30999) + worker_port = _available_port(29000, 29999) + pid = subprocess.Popen( + [ + "ssh", + slave_ip, + shutil.which("dask-worker"), + address, + '--nthreads=1', + '--nprocs=1', + '--memory-limit=0', + f"--local-directory={local_dir}", + f"--tls-ca-file={sec_cfg.ca_cert}", + f"--tls-cert={sec_cfg.client_cert_dask}", + f"--tls-key={sec_cfg.client_secret_key_dask}", + "--no-dashboard", + f"--host={slave_ip}", + "--protocol=tls", + f"--nanny-port={nanny_port}", + f"--worker-port={worker_port}", + ], + env=os.environ + ) + return pid diff --git a/evaluate_service/evaluate_service/security/utils.py b/evaluate_service/evaluate_service/security/utils.py new file mode 100644 index 0000000..9b6c220 --- /dev/null +++ b/evaluate_service/evaluate_service/security/utils.py @@ -0,0 +1,46 @@ +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Context utils.""" +import ssl +import sys +import logging + + +def create_context(ca_file, cert_pem_file, secret_key_file, key_mm=None, key_component_1=None, key_component_2=None): + """Create the SSL context.""" + ciphers = "ECDHE-ECDSA-AES128-CCM:ECDHE-ECDSA-AES256-CCM:ECDHE-ECDSA-AES128-GCM-SHA256" \ + ":ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384" \ + ":DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES128-GCM-SHA256" \ + ":DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-CCM:DHE-RSA-AES256-CCM" + context = ssl.SSLContext(ssl.PROTOCOL_TLS) + context.options += ssl.OP_NO_TLSv1 + context.options += ssl.OP_NO_TLSv1_1 + if sys.version_info >= (3, 7): + context.options += ssl.OP_NO_TLSv1_2 + context.options += ssl.OP_NO_RENEGOTIATION + context.options -= ssl.OP_ALL + context.verify_mode = ssl.CERT_REQUIRED + context.set_ciphers(ciphers) + if key_mm is not None: + from .kmc.kmc import decrypt + logging.debug("Using encrypted key.") + if key_component_1 is None or key_component_2 is None: + logging.error("For encrypted key, the component must be provided.") + decrypt_mm = decrypt(cert_pem_file, secret_key_file, key_mm, key_component_1, key_component_2) + context.load_cert_chain(cert_pem_file, secret_key_file, password=decrypt_mm) + else: + context.load_cert_chain(cert_pem_file, secret_key_file) + context.load_verify_locations(ca_file) + return context diff --git a/evaluate_service/evaluate_service/security/verify_cert.py b/evaluate_service/evaluate_service/security/verify_cert.py new file mode 100644 index 0000000..cdc7238 --- /dev/null +++ b/evaluate_service/evaluate_service/security/verify_cert.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Verify cert.""" + +import logging + + +def verify_cert(ca_cert_file, cert_file): + """Verify the cert.""" + from OpenSSL.crypto import load_certificate, FILETYPE_PEM, X509Store, X509StoreContext, X509StoreContextError + ca_cert = load_certificate(FILETYPE_PEM, open(ca_cert_file, "r", encoding="utf-8").read()) + cert = load_certificate(FILETYPE_PEM, open(cert_file, 'r', encoding="utf-8").read()) + if ca_cert.has_expired() or cert.has_expired(): + logging.error("The cert is expired, please check.") + return False + store = X509Store() + store.add_cert(ca_cert) + ctx = X509StoreContext(store, cert) + try: + ctx.verify_certificate() + except X509StoreContextError: + logging.error("Certificate signature failure, ca cert file and cert file not match.") + return False + return True diff --git a/evaluate_service/evaluate_service/security/verify_config.py b/evaluate_service/evaluate_service/security/verify_config.py new file mode 100644 index 0000000..f5c910e --- /dev/null +++ b/evaluate_service/evaluate_service/security/verify_config.py @@ -0,0 +1,152 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run pipeline.""" + +import configparser +import logging +import os +import stat + + +def _file_exist(path): + return os.access(path, os.F_OK) + + +def _file_belong_to_current_user(path): + return os.stat(path).st_uid == os.getuid() + + +def _file_other_writable(path): + return os.stat(path).st_mode & stat.S_IWOTH + + +def _file_is_link(path): + return os.path.islink(path) + + +def _get_risky_files_by_suffix(suffixes, path): + risky_files = [] + non_current_user_files = [] + others_writable_files = [] + link_files = [] + for suffix in suffixes: + if not path.endswith(suffix): + continue + abs_path = os.path.abspath(path) + if _file_exist(abs_path): + risky_files.append(abs_path) + if not _file_belong_to_current_user(abs_path): + non_current_user_files.append(abs_path) + if _file_other_writable(abs_path): + others_writable_files.append(abs_path) + if _file_is_link(abs_path): + link_files.append(abs_path) + + return risky_files, non_current_user_files, others_writable_files, link_files + + +def get_risky_files(config): + """Get contained risky file (.pth/.pth.tar/.onnx/.py).""" + risky_files = [] + non_current_user_files = [] + others_writable_files = [] + link_files = [] + from vega.common.config import Config + if not isinstance(config, Config): + return risky_files, non_current_user_files, others_writable_files, link_files + + for value in config.values(): + if isinstance(value, Config) and value.get("type") == "DeepLabNetWork": + value = value.get("dir").rstrip("/") + "/" + value.get("name").lstrip("/") + ".py" + if isinstance(value, str): + temp_risky_files, temp_non_current_user_files, temp_other_writable_files, temp_link_files \ + = _get_risky_files_by_suffix([".pth", ".pth.tar", ".py"], value) + risky_files.extend(temp_risky_files) + non_current_user_files.extend(temp_non_current_user_files) + others_writable_files.extend(temp_other_writable_files) + link_files.extend(temp_link_files) + temp_risky_files, temp_non_current_user_files, temp_other_writable_files, temp_link_files \ + = get_risky_files(value) + risky_files.extend(temp_risky_files) + non_current_user_files.extend(temp_non_current_user_files) + others_writable_files.extend(temp_other_writable_files) + link_files.extend(temp_link_files) + + return risky_files, non_current_user_files, others_writable_files, link_files + + +def check_risky_file(args, config): + """Check risky file (.pth/.pth.tar/.py).""" + if not args.security: + return True + risky_files, non_current_user_files, others_writable_files, link_files = get_risky_files(config) + if len(risky_files) == 0: + return True + + print("\033[1;33m" + "WARNING: The following executable files will be loaded:" + "\033[0m") + for file in risky_files: + print(file) + if len(non_current_user_files) > 0: + print("\033[1;33m" + "WARNING: The following executable files that will be loaded do not belong to the current user:" + "\033[0m") + for file in non_current_user_files: + print(file) + if len(others_writable_files) > 0: + print("\033[1;33m" + "WARNING: The following executable files that will be loaded have others write permission:" + "\033[0m") + for file in others_writable_files: + print(file) + if len(link_files) > 0: + print("\033[1;33m" + "WARNING: The following executable files that will be loaded is soft link file:" + "\033[0m") + for file in link_files: + print(file) + user_confirm = input("It is possible to construct malicious pickle data " + "which will execute arbitrary code during unpickling .pth/.pth.tar/.py files. " + "\nPlease ensure the safety and consistency of the loaded executable files. " + "\nDo you want to continue? (yes/no) ").strip(" ") + while user_confirm != "yes" and user_confirm != "no": + user_confirm = input("Please enter yes or no! ").strip(" ") + if user_confirm == "yes": + return True + elif user_confirm == "no": + return False + + +def check_risky_files(file_list): + """Check if cert and key file are risky.""" + res = True + for file in file_list: + if not os.path.exists(file): + logging.error(f"File <{file}> does not exist") + res = False + continue + if not _file_belong_to_current_user(file): + logging.error(f"File <{file}> is not owned by current user") + res = False + if _file_is_link(file): + logging.error(f"File <{file}> should not be soft link") + res = False + if os.stat(file).st_mode & 0o0177: + logging.error(f"File <{file}> permissions are not correct, cannot exceed 600") + res = False + return res diff --git a/evaluate_service/evaluate_service/security/zmq_op.py b/evaluate_service/evaluate_service/security/zmq_op.py new file mode 100644 index 0000000..29b89d5 --- /dev/null +++ b/evaluate_service/evaluate_service/security/zmq_op.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ZMQ operation.""" +import os +import uuid +import zmq +import zmq.auth +from zmq.auth.thread import ThreadAuthenticator + + +def listen_security(ip, min_port, max_port, max_tries, temp_path): + """Listen on server.""" + ctx = zmq.Context.instance() + # Start an authenticator for this context. + auth = ThreadAuthenticator(ctx) + auth.start() + auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) + + socket = ctx.socket(zmq.REP) + server_secret_key = os.path.join(temp_path, "server.key_secret") + if not os.path.exists(server_secret_key): + _, server_secret_key = zmq.auth.create_certificates(temp_path, "server") + server_public, server_secret = zmq.auth.load_certificate(server_secret_key) + if os.path.exists(server_secret_key): + os.remove(server_secret_key) + socket.curve_secretkey = server_secret + socket.curve_publickey = server_public + socket.curve_server = True # must come before bind + + port = socket.bind_to_random_port( + f"tcp://{ip}", min_port=min_port, max_port=max_port, max_tries=100) + return socket, port + + +def connect_security(ip, port, temp_path): + """Connect to server.""" + ctx = zmq.Context.instance() + socket = ctx.socket(zmq.REQ) + client_name = uuid.uuid1().hex[:8] + client_secret_key = os.path.join(temp_path, "{}.key_secret".format(client_name)) + if not os.path.exists(client_secret_key): + client_public_key, client_secret_key = zmq.auth.create_certificates(temp_path, client_name) + client_public, client_secret = zmq.auth.load_certificate(client_secret_key) + socket.curve_secretkey = client_secret + socket.curve_publickey = client_public + server_public_key = os.path.join(temp_path, "server.key") + if not os.path.exists(server_public_key): + server_public_key, _ = zmq.auth.create_certificates(temp_path, "server") + server_public, _ = zmq.auth.load_certificate(server_public_key) + socket.curve_serverkey = server_public + socket.connect(f"tcp://{ip}:{port}") + if os.path.exists(client_secret_key): + os.remove(client_secret_key) + if os.path.exists(client_public_key): + os.remove(client_public_key) + return socket diff --git a/evaluate_service/hardwares/__init__.py b/evaluate_service/hardwares/__init__.py deleted file mode 100644 index c388bb8..0000000 --- a/evaluate_service/hardwares/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .davinci.davinci import Davinci -from .mobile.mobile import Mobile -from .kirin990_npu.kirin990_npu import Kirin990_npu - -__all__ = ['Davinci', "Mobile", "Kirin990_npu"] diff --git a/evaluate_service/hardwares/davinci/compile_atlas200.sh b/evaluate_service/hardwares/davinci/compile_atlas200.sh deleted file mode 100644 index 5c86edf..0000000 --- a/evaluate_service/hardwares/davinci/compile_atlas200.sh +++ /dev/null @@ -1,43 +0,0 @@ -# inference for Atlas 200 DK -WORK_DIR=$1 -EXAMPLE_DIR=$2 -DDK_USER_NAME=$3 -DDK_HOST_IP=$4 -ATLAS_HOST_IP=$5 -APP_DIR=$6 - -CURRENT_DIR=$(pwd) -#source env.sh - -# copy the example project to work dir -mkdir $WORK_DIR/build_files/ -cp -rf $EXAMPLE_DIR/* $WORK_DIR/build_files/ - -mkdir -p $WORK_DIR/build_files/run/out/test_data/model/ -mkdir -p $WORK_DIR/build_files/run/out/test_data/data/ -cp $WORK_DIR/*.om $WORK_DIR/build_files/run/out/test_data/model/ -cp $WORK_DIR/*.bin $WORK_DIR/build_files/run/out/test_data/data/ - - -# build the file -cd $WORK_DIR/build_files/ -mkdir -p build/intermediates/device -mkdir -p build/intermediates/host - -cd build/intermediates/device -cmake ../../../src -Dtype=device -Dtarget=RC -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++ -DCMAKE_CXX_FLAGS="-s" -DCMAKE_C_FLAGS="-s" -make install -echo "[INFO] build the device sucess" -cd ../host -cmake ../../../src -Dtype=host -Dtarget=RC -DCMAKE_CXX_COMPILER=aarch64-linux-gnu-g++ -DCMAKE_CXX_FLAGS="-s" -DCMAKE_C_FLAGS="-s" -make install -echo "[INFO] build the host sucess" - -cd $CURRENT_DIR - -# execute in Atlas 200 DK -#scp /home/ly/evaluate_test/atlas_execute.sh HwHiAiUser@$ATLAS_HOST_IP:~/ -#echo "[INFO] copy the atlas_execute.sh to Atlas 200 DK." -ssh -o "StrictHostKeyChecking no" HwHiAiUser@$ATLAS_HOST_IP "bash -s" < ./utils/atlas200_dk/atlas_execute.sh $WORK_DIR $DDK_USER_NAME $DDK_HOST_IP $APP_DIR -echo "[INFO] execute in Atlas 200 DK finish." - diff --git a/evaluate_service/hardwares/davinci/env/check_atlas300.sh b/evaluate_service/hardwares/davinci/env/check_atlas300.sh deleted file mode 100644 index 7b05e9a..0000000 --- a/evaluate_service/hardwares/davinci/env/check_atlas300.sh +++ /dev/null @@ -1,13 +0,0 @@ -echo "[INFO] start check the enviroment..." -python3 -c "import te" && echo "[INFO] check te sucess" -python3 -c "import topi" && echo "[INFO] check topi sucess" -#cmake --version && echo "[INFO] check cmake sucess" -atc --version && echo "[INFO] check atc sucess " - -echo "[INFO] start compile the example..." - -cd ../samples/atlas300/ -mkdir -p build/intermediates/host -cd build/intermediates/host -cmake ../../../src -DCMAKE_CXX_COMPILER=g++ -DCMAKE_SKIP_RPATH=TRUE -make && echo "[INFO] check the env sucess!" diff --git a/evaluate_service/hardwares/davinci/env/env_atlas200dk.sh b/evaluate_service/hardwares/davinci/env/env_atlas200dk.sh deleted file mode 100644 index c781b54..0000000 --- a/evaluate_service/hardwares/davinci/env/env_atlas200dk.sh +++ /dev/null @@ -1,10 +0,0 @@ -export DDK_PATH={user_path}/huawei/ddk -export PYTHONPATH=$DDK_PATH/site-packages/te-0.4.0.egg:$DDK_PATH/site-packages/topi-0.4.0.egg -export LD_LIBRARY_PATH=$DDK_PATH/uihost/lib:$DDK_PATH/lib/x86_64-linux-gcc5.4 -export PATH=$PATH:$DDK_PATH/toolchains/ccec-linux/bin:$DDK_PATH/uihost/bin -export TVM_AICPU_LIBRARY_PATH=$DDK_PATH/uihost/lib/:$DDK_PATH/uihost/toolchains/ccec-linux/aicpu_lib -export TVM_AICPU_INCLUDE_PATH=$DDK_PATH/include/inc/tensor_engine -export TVM_AICPU_OS_SYSROOT={user_path}/tools/sysroot/aarch64_Ubuntu16.04.3 -export NPU_HOST_LIB={user_path}/tools/1.32.0.B080/RC/host-aarch64_Ubuntu16.04.3/lib -export NPU_DEV_LIB={user_path}/tools/1.32.0.B080/RC/host-aarch64_Ubuntu16.04.3/lib -#export CPLUS_INCLUDE_PATH=$DDK_PATH/include/inc:$DDK_PATH/include/third_party diff --git a/evaluate_service/hardwares/davinci/env/env_atlas300.sh b/evaluate_service/hardwares/davinci/env/env_atlas300.sh deleted file mode 100644 index 3319df1..0000000 --- a/evaluate_service/hardwares/davinci/env/env_atlas300.sh +++ /dev/null @@ -1,7 +0,0 @@ -export ASCEND_HOME=/usr/local/Ascend -export PATH=/opt/cmake-3.14.5-Linux-x86_64/bin:/usr/local/python3.7.5/bin:$ASCEND_HOME/atc/ccec_compiler/bin:$ASCEND_HOME/atc/bin:$PATH -export LD_LIBRARY_PATH=$ASCEND_HOME/atc/python/site-packages/te.egg/lib:$ASCEND_HOME/atc/lib64:$ASCEND_HOME/acllib/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons -export PYTHONPATH=$PYTHONPATH:$ASCEND_HOME/atc/python/site-packages/te.egg:$ASCEND_HOME/atc/python/site-packages/topi.egg:$ASCEND_HOME/atc/python/site-packages/auto_tune.egg -export ASCEND_OPP_PATH=$ASCEND_HOME/ascend-toolkit/20.2.0/x86_64-linux/opp -export DDK_PATH=$ASCEND_HOME -export NPU_HOST_LIB=$ASCEND_HOME/ascend-toolkit/20.2.0/x86_64-linux/acllib/lib64/stub \ No newline at end of file diff --git a/evaluate_service/hardwares/davinci/env/env_evb.sh b/evaluate_service/hardwares/davinci/env/env_evb.sh deleted file mode 100644 index d1c475c..0000000 --- a/evaluate_service/hardwares/davinci/env/env_evb.sh +++ /dev/null @@ -1,8 +0,0 @@ -export ASCEND_HOME=/usr/local/Ascend -export PATH=/usr/local/python3.7/bin:$ASCEND_HOME/atc/ccec_compiler/bin:$ASCEND_HOME/atc/bin:$PATH -export LD_LIBRARY_PATH=$ASCEND_HOME/atc/python/site-packages/te.egg/lib:$ASCEND_HOME/acllib/lib64:$ASCEND_HOME/atc/lib64:$ASCEND_HOME/driver/lib64:$ASCEND_HOME/add-ons:/usr/local/Ascend/atc/lib64/plugin/opskernel -export PYTHONPATH=$PYTHONPATH:$ASCEND_HOME/atc/python/site-packages/te.egg:$ASCEND_HOME/atc/python/site-packages/topi.egg:$ASCEND_HOME/atc/python/site-packages/auto_tune.egg -export ASCEND_OPP_PATH=/usr/local/Ascend/opp -export SLOG_PRINT_TO_STDOUT=1 -#export DUMP_GE_GRAPH=1 -#export DUMP_OP=1 diff --git a/evaluate_service/hardwares/davinci/inference_atlas200.sh b/evaluate_service/hardwares/davinci/inference_atlas200.sh deleted file mode 100644 index 4808691..0000000 --- a/evaluate_service/hardwares/davinci/inference_atlas200.sh +++ /dev/null @@ -1,18 +0,0 @@ -WORK_DIR=$1 -DDK_USER_NAME=$2 -DDK_HOST_IP=$3 -APP_DIR=$4 - -cd ~ -mkdir -p $APP_DIR -cd ~/$APP_DIR -scp -r $DDK_USER_NAME@$DDK_HOST_IP:$WORK_DIR/build_files/run/out/* ./ -echo "[INFO] copy the fils to Atlas 200 Dk sucess." -./main >ome.log -echo "[INFO] run exe in Atlas 200 Dk sucess." -scp ome.log $DDK_USER_NAME@$DDK_HOST_IP:$WORK_DIR/ -scp ./result_files/result_file $DDK_USER_NAME@$DDK_HOST_IP:$WORK_DIR/ -echo "[INFO] copy the result log to DDK host sucess." -cd ../ -rm -rf ./$APP_DIR -echo "[INFO] delete the temp files in Atlas 200 DK sucess." diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/classify_net_ai_engine.h b/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/classify_net_ai_engine.h deleted file mode 100644 index e4bded2..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/classify_net_ai_engine.h +++ /dev/null @@ -1,52 +0,0 @@ -/** -* @file classify_net_ai_engine.h -* -* Copyright(c)<2018>, -* -* @version 1.0 -* -* @date 2018-6-7 -*/ - -#ifndef INC_CLASSIFY_NET_AI_ENGINE_H_ -#define INC_CLASSIFY_NET_AI_ENGINE_H_ -#include -#include -#include -#include -#include -#include "inc/common.h" -#include -class ClassifyNetEngine : public hiai::Engine -{ - public: - /** - * @ingroup ClassifyNetEngine - * @brief ClassifyNetEngine init function - * @param [in]: config, Configuration parameters - * @param [in]: model_desc, Model Description - * @param [out]: HIAI_StatusT - */ - HIAI_StatusT Init(const hiai::AIConfig &config, - const std::vector& model_desc); - - /** - * @ingroup ~ClassifyNetEngine - * @brief ~ClassifyNetEngine Destructor function - */ - ~ClassifyNetEngine(); - /** - * @ingroup ClassifyNetEngine - * @brief ClassifyNetEngine executor function - * @param [in]: CLASSIFYNET_ENGINE_INPUT_SIZE, numbers of in port - * @param [in]: CLASSIFYNET_ENGINE_OUTPUT_SIZE, numbers of out out - * @param [out]: HIAI_StatusT - */ - HIAI_DEFINE_PROCESS(CLASSIFYNET_ENGINE_INPUT_SIZE, CLASSIFYNET_ENGINE_OUTPUT_SIZE); - private: - std::map config_; // config map - std::shared_ptr ai_model_manager_; // Model Manager Instance - std::vector> outDataVec_; - std::vector outData_; -}; -#endif // INC_CLASSIFY_NET_AI_ENGINE_H_ diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/classify_net_host.h b/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/classify_net_host.h deleted file mode 100644 index b251e24..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/classify_net_host.h +++ /dev/null @@ -1,50 +0,0 @@ -/** -* @file classify_net_host.h -* -* Copyright(c)<2018>, -* -* @version 1.0 -* -* @date 2018-6-7 -*/ -#ifndef INC_CLASSIFY_NET_HOST_H_ -#define INC_CLASSIFY_NET_HOST_H_ -#include -#include -#include "inc/common.h" -/* -* Source Engine -*/ -class SourceEngine : public hiai::Engine -{ - /** - * @ingroup SourceEngine - * @brief SourceEngine Process function - * @param [in]: SOURCE_ENGINE_INPUT_SIZE, Source Engine in port - * @param [in]: SOURCE_ENGINE_OUTPUT_SIZE, Source Engine out port - * @param [out]: HIAI_StatusT - */ - HIAI_DEFINE_PROCESS(SOURCE_ENGINE_INPUT_SIZE, SOURCE_ENGINE_OUTPUT_SIZE) -}; - -/* -* Dest Engine -*/ -class DestEngine : public hiai::Engine -{ - public: - DestEngine() : - input_que_(DEST_ENGINE_INPUT_SIZE) {} - /** - * @ingroup SourceEngine - * @brief SourceEngine Process function - * @param [in]: DEST_ENGINE_INPUT_SIZE, Source Engine in port - * @param [in]: DEST_ENGINE_OUTPUT_SIZE, Source Engine out port - * @param [out]: HIAI_StatusT - */ - HIAI_DEFINE_PROCESS(DEST_ENGINE_INPUT_SIZE, DEST_ENGINE_OUTPUT_SIZE) - - private: - hiai::MultiTypeQueue input_que_; -}; -#endif // INC_CLASSIFY_NET_HOST_H_ diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/common.h b/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/common.h deleted file mode 100644 index 9778b37..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/common.h +++ /dev/null @@ -1,70 +0,0 @@ -/** -* @file vgg16_main -* -* Copyright(c)<2018>, -* -* @version 1.0 -* -* @date 2018-6-7 -*/ -#ifndef INC_COMMON_H_ -#define INC_COMMON_H_ -#include -#include -#include - -// Defines the global value. -// Defines the file path. -static std::string TEST_SRC_FILE_PATH = "";// = "./test_data//data/source_test.bin"; -static std::string TEST_DEST_FILE_PATH =""; -static std::string GRAPH_CONFIG_FILE_PATH = "";// ="./test_data/config/sample.prototxt"; -static std::string GRAPH_MODEL_PATH = ""; - -// Defines Graph,Engine ID -static const uint32_t GRAPH_ID = 100; -static const uint32_t SRC_ENGINE_ID = 1000; -static const uint32_t SRC_PORT_ID = 0; -static const uint32_t DST_ENGINE_ID = 1002; -static const uint32_t DEST_PORT_ID_0 = 0; -static const uint32_t DEST_PORT_ID_1 = 1; - -// Defines Output shape -const std::vector DATA_NUM = {10}; - -// Defines the global value -static std::mutex local_test_mutex; -static std::condition_variable local_test_cv_; -static const uint32_t MAX_SLEEP_TIMER = 30 * 60; -static const uint32_t MIN_ARG_VALUE = 2; -// Defines image parameters. -static const float IMG_DEPTH = 1.0; -static const uint32_t SEND_COUNT = 100; -static const std::string modelName = "ClassifyModel"; - -// Defines the message_type character string. -static const std::string message_type_engine_trans = "EngineTransT"; - -// Defines the number of Engine ports. -// Source Engine -#define SOURCE_ENGINE_INPUT_SIZE 1 -#define SOURCE_ENGINE_OUTPUT_SIZE 1 - -// Dest Engine -#define DEST_ENGINE_INPUT_SIZE 1 -#define DEST_ENGINE_OUTPUT_SIZE 1 - -// ClassifyNet Engine -#define CLASSIFYNET_ENGINE_INPUT_SIZE 1 -#define CLASSIFYNET_ENGINE_OUTPUT_SIZE 1 - -#define IMAGE_INFO_DATA_NUM (3) - -// Defines the transmission structure. -typedef struct EngineTrans -{ - std::string trans_buff; - uint32_t buffer_size; - HIAI_SERIALIZE(trans_buff, buffer_size); -}EngineTransT; - -#endif // INC_COMMON_H_ diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/data_recv.h b/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/data_recv.h deleted file mode 100644 index 15bafc8..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/data_recv.h +++ /dev/null @@ -1,35 +0,0 @@ -/** -* @file data_recv.h -* -* Copyright(c)<2018>, -* -* @version 1.0 -* -* @date 2018-6-7 -*/ -#ifndef INC_DATA_RECV_H_ -#define INC_DATA_RECV_H_ -#include -#include -class ClassifyNetDataRecvInterface : public hiai::DataRecvInterface -{ - public: - /** - * @ingroup ClassifyNetDataRecvInterface - * @brief construct function - * @param [in]desc:std::string - */ - ClassifyNetDataRecvInterface(const std::string& filename) : - file_name_(filename) {} - - /** - * @ingroup ClassifyNetDataRecvInterface - * @brief RecvData RecvData callback,Save the File - * @param [in] - */ - HIAI_StatusT RecvData(const std::shared_ptr& message); - - private: - std::string file_name_; // Target Save File -}; -#endif // INC_DATA_RECV_H_ diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/error_code.h b/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/error_code.h deleted file mode 100644 index 1651a3e..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/error_code.h +++ /dev/null @@ -1,38 +0,0 @@ -/** -* @file error_code.h -* -* Copyright(c)<2018>, -* -* @version 1.0 -* -* @date 2018-6-7 -*/ -#ifndef ERROR_CODE_H_ -#define ERROR_CODE_H_ - -#include "hiaiengine/status.h" -#define MODID_CLASSIFY_NET 0x401 -enum{ - HIAI_INVALID_INPUT_MSG_CODE = 0x401, - HIAI_AI_MODEL_MANAGER_INIT_FAIL_CODE, - HIAI_AI_MODEL_MANAGER_PROCESS_FAIL_CODE, - HIAI_SEND_DATA_FAIL_CODE, - HIAI_AI_MODEL_CREATE_OUTPUT_FAIL_CODE, - HIAI_AI_MODEL_WRONG_OUTPUT_SIZE_CODE, - HIAI_ARG_NUMBER_NOK_CODE -}; -HIAI_DEF_ERROR_CODE(MODID_CLASSIFY_NET, HIAI_ERROR, HIAI_INVALID_INPUT_MSG, \ - "invalid input message pointer"); -HIAI_DEF_ERROR_CODE(MODID_CLASSIFY_NET, HIAI_ERROR, HIAI_AI_MODEL_MANAGER_INIT_FAIL, \ - "ai model manager init failed"); -HIAI_DEF_ERROR_CODE(MODID_CLASSIFY_NET, HIAI_ERROR, HIAI_AI_MODEL_MANAGER_PROCESS_FAIL, \ - "ai model manager process failed"); -HIAI_DEF_ERROR_CODE(MODID_CLASSIFY_NET, HIAI_ERROR, HIAI_SEND_DATA_FAIL, \ - "send data failed"); -HIAI_DEF_ERROR_CODE(MODID_CLASSIFY_NET, HIAI_ERROR, HIAI_AI_MODEL_CREATE_OUTPUT_FAIL, - "Failed to create output tensor"); -HIAI_DEF_ERROR_CODE(MODID_CLASSIFY_NET, HIAI_ERROR, HIAI_AI_MODEL_WRONG_OUTPUT_SIZE, - "Classify net rcnn output size is wrong"); -HIAI_DEF_ERROR_CODE(MODID_CLASSIFY_NET, HIAI_ERROR, HIAI_ARG_NUMBER_NOK, - "Arg number is not right, example: classify_net_main vgg16"); -#endif // ERROR_CODE_H_ diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/sample_data.h b/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/sample_data.h deleted file mode 100644 index 5197ea1..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/sample_data.h +++ /dev/null @@ -1,18 +0,0 @@ -/** -* @file sample_data.cpp -* -* Copyright (C) <2018> . All Rights Reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ -#include "hiaiengine/data_type.h" -#include "hiaiengine/data_type_reg.h" - -// Register the structure that the Engine transfers. -typedef struct EngineTransNew -{ - std::shared_ptr trans_buff; - uint32_t buffer_size; // buffer size -}EngineTransNewT; \ No newline at end of file diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/tensor.h b/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/tensor.h deleted file mode 100644 index 2989eb2..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/tensor.h +++ /dev/null @@ -1,93 +0,0 @@ -#ifndef DDK_TENSOR_H_ -#define DDK_TENSOR_H_ - -#include -#include -#include -#include -#include -#include -#include - -#include "common.h" -namespace ddk{ - - template - class Tensor - { - public: - Tensor():data(nullptr) {} - ~Tensor() {clear();} - - /** - * @ingroup Tensor - * @brief fromarray - * @param [in]: pdata - * @param [in]: shape - */ - void fromarray(const T* pdata, const std::vector& shape) - { - clear(); - uint32_t size = 1; - for (auto dim : shape) - { - size *= dim; - } - data = new T[size]; - memcpy_s(data, size*sizeof(T), pdata, size*sizeof(T)); - dims = shape; - } - - /** - * @ingroup Tensor - * @brief dump - * @param [in]: filepath - */ - - bool dump(const std::string& filepath) const - { - std::ofstream ofs(filepath, std::ios::binary | std::ios::trunc); - if(ofs.fail()) - { - fprintf(stderr,"Failed to open \" %s \".\n", filepath.c_str()); - return false; - } - uint32_t rank = dims.size(); - ofs << "rank: "; - ofs << rank; - ofs << " "; - uint32_t size = 1; - ofs << "dim: "; - for (uint32_t dim: dims) - { - size *= dim; - ofs << dim; - ofs << " "; - } - ofs << "data: \n"; - for (uint32_t count = 0; count < size; count++) - { - ofs << "label:" << count + 1 << " value:"; - ofs << std::setw(10) << std::setprecision(6) << data[count] <<"\n"; - } - ofs.close(); - return true; - } - - private: - void clear() - { - if (data) - { - delete []data; - data = nullptr; - } - dims.clear(); - } - std::vector dims; - T* data; - }; -} - - -#endif diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/util.h b/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/util.h deleted file mode 100644 index dc76f3b..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/inc/util.h +++ /dev/null @@ -1,21 +0,0 @@ -/** -* @file util.h -* -* Copyright(c)<2018>, -* -* @version 1.0 -* -* @date 2018-6-7 -*/ -#ifndef INC_UTIL_H_ -#define INC_UTIL_H_ -#include -#include -class Util -{ -public: - static char* ReadBinFile(std::shared_ptr file_name_ptr, uint32_t* file_size, int32_t batchSize, bool& isDMalloc); - static bool CheckFileExist(const std::string& file_name); - static void ClassifyDump(const std::string& file_name, std::shared_ptr data); -}; -#endif //INC_UTIL_H_ diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/run/out/test_data/config/graph_sample.prototxt b/evaluate_service/hardwares/davinci/samples/atlas200dk/run/out/test_data/config/graph_sample.prototxt deleted file mode 100644 index cafdce1..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/run/out/test_data/config/graph_sample.prototxt +++ /dev/null @@ -1,42 +0,0 @@ -graphs { - graph_id: 100 - priority: 1 - engines { - id: 1000 - engine_name: "SourceEngine" - side: HOST - thread_num: 1 - } - engines { - id: 1002 - engine_name: "DestEngine" - side: HOST - thread_num: 1 - } - engines { - id: 1003 - engine_name: "ClassifyNetEngine" - side: DEVICE - so_name: "./libai_engine.so" - thread_num: 1 - ai_config{ - items{ - name: "model_path" - value: "./test_data/model/davinci_model.om" - } - } - } - - connects { - src_engine_id: 1000 - src_port_id: 0 - target_engine_id: 1003 - target_port_id: 0 - } - connects { - src_engine_id: 1003 - src_port_id: 0 - target_engine_id: 1002 - target_port_id: 0 - } -} diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/CMakeLists.txt b/evaluate_service/hardwares/davinci/samples/atlas200dk/src/CMakeLists.txt deleted file mode 100644 index 26652a5..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/CMakeLists.txt +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) Huawei Technologies Co., Ltd. 2019. All rights reserved. - -# CMake lowest version requirement -cmake_minimum_required(VERSION 3.5.1) - -# project information -project(classify_net) - -# Compile options -add_compile_options(-std=c++11) - -# Specify target generation path -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "../../outputs") -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "../../outputs") -set(CMAKE_INSTALL_PREFIX "../../../run") -set(CMAKE_OUTPUT_DIR "out") - - -set(CMAKE_CXX_FLAGS_RELEASE "-fPIC -O2 -Wall") - -# Header path -include_directories( -../ -$ENV{DDK_PATH}/include/inc/ -$ENV{DDK_PATH}/include/third_party/protobuf/include -$ENV{DDK_PATH}/include/third_party/cereal/include -$ENV{DDK_PATH}/include/libc_sec/include -) - -if(type STREQUAL host) - if(target STREQUAL "RC") - add_compile_options(-DIS_RC) - endif() - - # add host lib path - link_directories($ENV{NPU_HOST_LIB}) - - add_executable(main classify_net_main.cpp data_recv.cpp classify_net_host.cpp util.cpp sample_data.cpp) - - - if(target STREQUAL "RC") - target_link_libraries(main matrixdaemon pthread c_sec dl rt) - else() - target_link_libraries(main matrix pthread c_sec dl rt) - endif() - - install(TARGETS main DESTINATION ${CMAKE_OUTPUT_DIR}) -else() - add_compile_options(-DCPU_ONLY) - - # add device lib path - link_directories($ENV{NPU_DEV_LIB}) - - add_library(ai_engine SHARED classify_net_ai_engine.cpp sample_data.cpp) - - if(target STREQUAL "RC") - target_link_libraries(ai_engine idedaemon hiai_common c_sec) - else() - target_link_libraries(ai_engine idedaemon c_sec) - endif() - - install(TARGETS ai_engine DESTINATION ${CMAKE_OUTPUT_DIR}) -endif() - diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/classify_net_ai_engine.cpp b/evaluate_service/hardwares/davinci/samples/atlas200dk/src/classify_net_ai_engine.cpp deleted file mode 100644 index 3281c1c..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/classify_net_ai_engine.cpp +++ /dev/null @@ -1,152 +0,0 @@ -/** -* @file classify_net_ai_engine.cpp -* -* Copyright(c)<2018>, -* -* @version 1.0 -* -* @date 2018-6-7 -*/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "inc/classify_net_ai_engine.h" -#include "inc/error_code.h" -#include "inc/sample_data.h" -#include "hiaiengine/ai_memory.h" -#include - - ClassifyNetEngine::~ClassifyNetEngine() { - // Release the pre-allocated memory of outData. - for (auto buffer : outData_) { - if (buffer != nullptr) { - hiai::HIAIMemory::HIAI_DVPP_DFree(buffer); - buffer = nullptr; - } - } - } -/** -* @ingroup ClassifyNetEngine -* @brief ClassifyNetEngine init function -* @param [in]:arg0 -*/ -HIAI_StatusT ClassifyNetEngine::Init(const hiai::AIConfig &config, - const std::vector& model_desc) -{ - HIAI_ENGINE_LOG(this, HIAI_OK, "ClassifyNetEngine Init"); - hiai::AIStatus ret = hiai::SUCCESS; - - // Obtaining Configuration Parameters - config_.clear(); - for (auto item : config.items()) { - config_[item.name()] = item.value(); - } - if (nullptr == ai_model_manager_) { - ai_model_manager_ = std::make_shared(); - } - - // Init Model - const char* model_path = config_["model_path"].c_str(); - std::vector model_desc_vec; - hiai::AIModelDescription model_desc_; - model_desc_.set_path(model_path); - model_desc_.set_key(""); - model_desc_.set_name(modelName); - model_desc_vec.push_back(model_desc_); - ret = ai_model_manager_->Init(config, model_desc_vec); - - if (ret != hiai::SUCCESS) { - HIAI_ENGINE_LOG(this, HIAI_AI_MODEL_MANAGER_INIT_FAIL, - "hiai ai model manager init fail"); - return HIAI_AI_MODEL_MANAGER_INIT_FAIL; - } - - std::vector inputTensorVec; - std::vector outputTensorVec; - ret = ai_model_manager_->GetModelIOTensorDim(modelName, inputTensorVec, outputTensorVec); - if (ret != hiai::SUCCESS) { - HIAI_ENGINE_LOG(this, HIAI_AI_MODEL_MANAGER_INIT_FAIL, - "hiai ai model manager init fail"); - return HIAI_AI_MODEL_MANAGER_INIT_FAIL; - } - // allocate OutData in advance - HIAI_StatusT hiai_ret = HIAI_OK; - for (size_t index = 0; index < outputTensorVec.size(); index++) { - hiai::AITensorDescription outputTensorDesc = hiai::AINeuralNetworkBuffer::GetDescription(); - uint8_t* buffer = nullptr; - hiai_ret = hiai::HIAIMemory::HIAI_DMalloc(outputTensorVec[index].size, (void*&)buffer, 1000); - if ((hiai_ret != HIAI_OK) || (buffer == nullptr)) { - printf("HIAI_DMalloc failed\n"); - continue; - } - outData_.push_back(buffer); - shared_ptr outputTensor = - hiai::AITensorFactory::GetInstance()->CreateTensor(outputTensorDesc, buffer, outputTensorVec[index].size); - outDataVec_.push_back(outputTensor); - } - - HIAI_ENGINE_LOG(this, HIAI_OK, "ClassifyNetEngine init success"); - return HIAI_OK; -} - -/** -* @ingroup ClassifyNetEngine -* @brief ClassifyNetEngine Process function -* @param [in]:arg0 -*/ -HIAI_IMPL_ENGINE_PROCESS("ClassifyNetEngine", ClassifyNetEngine, CLASSIFYNET_ENGINE_INPUT_SIZE) -{ - HIAI_ENGINE_LOG(this, HIAI_OK, "ClassifyNetEngine Process"); - HIAI_StatusT ret = HIAI_OK; - std::vector> inDataVec; - - std::shared_ptr input_arg = - std::static_pointer_cast(arg0); - if (nullptr == input_arg) - { - HIAI_ENGINE_LOG(this, HIAI_INVALID_INPUT_MSG, - "fail to process invalid message"); - return HIAI_INVALID_INPUT_MSG; - } - // Transfer buffer to Framework directly, only one inputsize - hiai::AITensorDescription inputTensorDesc = - hiai::AINeuralNetworkBuffer::GetDescription(); - shared_ptr inputTensor = - hiai::AITensorFactory::GetInstance()->CreateTensor(inputTensorDesc, - input_arg->trans_buff.get(), input_arg->buffer_size); - // AIModelManager. fill in the input data. - inDataVec.push_back(inputTensor); - - hiai::AIContext ai_context; - clock_t start_time=clock(); - // Process work - ret = ai_model_manager_->Process(ai_context, - inDataVec, outDataVec_, 0); - clock_t end_time=clock(); - cout<< "costTime "<(end_time-start_time)/CLOCKS_PER_SEC*1000< output_data = std::static_pointer_cast(outDataVec_[index]); - std::shared_ptr output_string_ptr = - std::shared_ptr(new std::string((char*)output_data->GetBuffer(), output_data->GetSize())); - hiai::Engine::SendData(0, "string", - std::static_pointer_cast(output_string_ptr)); - } - inDataVec.clear(); - return HIAI_OK; -} diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/classify_net_host.cpp b/evaluate_service/hardwares/davinci/samples/atlas200dk/src/classify_net_host.cpp deleted file mode 100644 index 350830b..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/classify_net_host.cpp +++ /dev/null @@ -1,121 +0,0 @@ -/** -* @file classify_net_host.cpp -* -* Copyright(c)<2018>, -* -* @version 1.0 -* -* @date 2018-6-7 -*/ -#include -#include -#include -#include -#include -#include -#include -#include -#include "inc/classify_net_host.h" -#include "inc/common.h" -#include "inc/util.h" -#include "inc/error_code.h" -#include "inc/sample_data.h" -#include "hiaiengine/ai_memory.h" -#include -//using namespace std; -//for performance -int64_t start_time = 0; -int64_t end_time = 0; -int64_t aifull_time = 0; -int64_t total_time = 0; -int32_t batch_size = 1; -extern int64_t g_count; - -void Delay(int time) -{ - clock_t now=clock(); - while(clock()-now < time); -} - -void deleteMemoryDmalloc(void* ptr) -{ - hiai::HIAIMemory::HIAI_DVPP_DFree(ptr); -} - -void deleteMemoryNew(void* ptr) -{ - if(ptr != nullptr) { - delete[] reinterpret_cast(ptr); - } -} - -/** -* @ingroup SourceEngine -* @brief SourceEngine Process function -* @param [in]:arg0 -*/ -HIAI_IMPL_ENGINE_PROCESS("SourceEngine", SourceEngine, SOURCE_ENGINE_INPUT_SIZE) -{ - HIAI_ENGINE_LOG(this, HIAI_OK, "SourceEngine Process"); - // Obtain the path of the original file. - std::shared_ptr input_arg = - std::static_pointer_cast(arg0); - if (nullptr == input_arg) - { - HIAI_ENGINE_LOG(this, HIAI_INVALID_INPUT_MSG, - "fail to process invalid message"); - return HIAI_INVALID_INPUT_MSG; - } - - for (uint32_t index = 0; index < SEND_COUNT; index++) - { - // Reads data and generates information. - uint32_t tmpBuffSize = 0; - bool isDMalloc = true; - char* tmpBuffData = Util::ReadBinFile(input_arg, &tmpBuffSize, batch_size, isDMalloc); - if (tmpBuffData == nullptr) { - HIAI_ENGINE_LOG(this, HIAI_INVALID_INPUT_MSG, - "alloc send buffer fail"); - return HIAI_INVALID_INPUT_MSG; - } - std::shared_ptr tmp_raw_data_ptr = std::make_shared(); - tmp_raw_data_ptr->buffer_size = tmpBuffSize; - if(isDMalloc == true) { - tmp_raw_data_ptr->trans_buff.reset((unsigned char*)tmpBuffData, deleteMemoryDmalloc); - } - else { - tmp_raw_data_ptr->trans_buff.reset((unsigned char*)tmpBuffData, deleteMemoryNew); - } - // Transferred to ClassifyNet Engine - HIAI_ENGINE_LOG(this, HIAI_OK, "SourceEngine Process:: begin to Senddata"); - hiai::Engine::SendData(0, "EngineTransNewT", - std::static_pointer_cast(tmp_raw_data_ptr), 10000); - } - HIAI_ENGINE_LOG(this, HIAI_OK, "SourceEngine Process Success"); - return HIAI_OK; -} - -/** -* @ingroup DestEngine -* @brief DestEngine Process function -* @param [in]:arg0 -*/ -HIAI_IMPL_ENGINE_PROCESS("DestEngine", DestEngine, DEST_ENGINE_INPUT_SIZE) -{ - HIAI_ENGINE_LOG(this, HIAI_OK, "DestEngine Process"); - std::shared_ptr data_result_ptr = - std::static_pointer_cast(arg0); - // Check whether the data_result_ptr is valid. - if (nullptr == data_result_ptr) - { - HIAI_ENGINE_LOG(this, HIAI_INVALID_INPUT_MSG, - "fail to process invalid message"); - return HIAI_INVALID_INPUT_MSG; - } - HIAI_ENGINE_LOG(this, HIAI_OK, "DestEngine Process:: already receive result data"); - // Send data_num and data_bbox to the callback function. - hiai::Engine::SendData(0, "string", - std::static_pointer_cast(data_result_ptr)); - HIAI_ENGINE_LOG(this, HIAI_OK, "DestEngine Process Success"); - return HIAI_OK; -} diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/classify_net_main.cpp b/evaluate_service/hardwares/davinci/samples/atlas200dk/src/classify_net_main.cpp deleted file mode 100644 index 0e4cedf..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/classify_net_main.cpp +++ /dev/null @@ -1,174 +0,0 @@ -/** - * @file classify_net_main - * - * Copyright(c)<2018>, - * - * @version 1.0 - * - * @date 2018-6-7 - */ -#include -#include -#include -#include -#include -#include -#include -#include "hiaiengine/api.h" -#include "inc/error_code.h" -#include "inc/common.h" -#include "inc/data_recv.h" -#include "inc/util.h" -uint32_t g_count = 0; -const int MAX_SLEEP_TIMES = 16; -static bool is_test_result_ready = false; - -/** -* @ingroup HIAI_InitAndStartGraph -* @brief Initializing and Creating Graph -* @param [in] -*/ -HIAI_StatusT HIAI_InitAndStartGraph() -{ - // Step1: Init HiaiEngine - HIAI_StatusT status = HIAI_Init(0); - HIAI_ENGINE_LOG("[DEBUG] Go to start Graph"); - // Step2: Create Graph based on the configuration of the proto file. - status = hiai::Graph::CreateGraph(GRAPH_CONFIG_FILE_PATH); - if (status != HIAI_OK) - { - printf("Fail to create graph\n"); - HIAI_ENGINE_LOG(status, "Fail to create graph"); - return status; - } - HIAI_ENGINE_LOG("[DEBUG] create Graph success"); - - // Step3: Set the Call Back callback function for the DST Engine. - std::shared_ptr graph = hiai::Graph::GetInstance(GRAPH_ID); - if (nullptr == graph) - { - printf("Fail to get the graph-%u instance\n", GRAPH_ID); - HIAI_ENGINE_LOG("Fail to get the graph-%u", GRAPH_ID); - return status; - } - - // Configure the target data. Target Graph, Target Engine, and Target Port - hiai::EnginePortID target_port_config; - target_port_config.graph_id = GRAPH_ID; - target_port_config.engine_id = DST_ENGINE_ID; - target_port_config.port_id = DEST_PORT_ID_0; - graph->SetDataRecvFunctor(target_port_config, - std::shared_ptr( - new ClassifyNetDataRecvInterface(TEST_DEST_FILE_PATH))); - return HIAI_OK; -} - -/** -* @ingroup CheckAllFileExist -* @brief Check whether all files are generated. -*/ -void CheckAllFileExist() -{ - for (int i = 0; i < MAX_SLEEP_TIMES; ++i) { - if (g_count == SEND_COUNT) - { - std::unique_lock lck(local_test_mutex); - is_test_result_ready = true; - printf("File %s generated\n", TEST_DEST_FILE_PATH.c_str()); - HIAI_ENGINE_LOG("Check Result success"); - return; - } - printf("Check Result, go into sleep 1 sec\n"); - HIAI_ENGINE_LOG("Check Result, go into sleep 1 sec"); - sleep(1); - } - printf("Check Result failed, timeout\n"); - HIAI_ENGINE_LOG("Check Result failed, timeout"); -} - -/** -* @ingroup main -* @brief main function -* @param [in]: argc, argv -*/ -int main(int argc, char* argv[]) -{ - printf("========== Test Start ==========\n"); - HIAI_StatusT ret = HIAI_OK; - - // The number of execution program parameters must be greater than or equal to 2. - // Sample: classify_net_main vgg16/classify_net_main resnet_18 - - // concatenate test_source_file/test_dest_file/test_graph_file - TEST_SRC_FILE_PATH = "./test_data/data/input.bin"; - TEST_DEST_FILE_PATH = "./result_files/result_file"; - GRAPH_CONFIG_FILE_PATH = "./test_data/config/graph_sample.prototxt"; - GRAPH_MODEL_PATH = "./test_data/model/davinci_model.om"; - std::string output = "./result_files"; - if (access(output.c_str(), 0) == -1) { - int flag = mkdir(output.c_str(), 0700); - if (flag == 0) { - HIAI_ENGINE_LOG("make output directory successfully"); - } - else { - printf("make output directory fail\n"); - HIAI_ENGINE_LOG(HIAI_ARG_NUMBER_NOK, "make output directory fail"); - return -1; - } - } - // Delete the target file. - remove(TEST_DEST_FILE_PATH.c_str()); - - for (int i = 0; i < MAX_SLEEP_TIMES; ++i) { - if (Util::CheckFileExist(GRAPH_MODEL_PATH)) { - printf("File %s is ready\n", GRAPH_MODEL_PATH.c_str()); - break; - } - sleep(1); - if (i == MAX_SLEEP_TIMES-1) { - printf("model file:%s is not existence, timeout\n", GRAPH_MODEL_PATH.c_str()); - } - } - - // Initializing and Creating Graph - ret = HIAI_InitAndStartGraph(); - if (HIAI_OK != ret) - { - printf("Fail to init and start graph\n"); - HIAI_ENGINE_LOG("Fail to init and start graph"); - return -1; - } - printf("Init and start graph succeed\n"); - - std::shared_ptr graph = hiai::Graph::GetInstance(GRAPH_ID); - if (nullptr == graph) - { - printf("Fail to get the graph-%u instance\n", GRAPH_ID); - HIAI_ENGINE_LOG("Fail to get the graph-%u", GRAPH_ID); - return -1; - } - - // Send data to Source Engine - hiai::EnginePortID target_engine; - target_engine.graph_id = GRAPH_ID; - target_engine.engine_id = SRC_ENGINE_ID; - target_engine.port_id = SRC_PORT_ID; - - std::shared_ptr src_string = - std::shared_ptr(new std::string(TEST_SRC_FILE_PATH)); - graph->SendData(target_engine, "string", - std::static_pointer_cast(src_string)); - - // Waiting for processing result - std::thread check_thread(CheckAllFileExist); - check_thread.join(); - - if (is_test_result_ready) { - printf("========== Test Succeed ==========\n"); - } else { - printf("========== Test Failed ==========\n"); - } - // Destroy Graph - hiai::Graph::DestroyGraph(GRAPH_ID); - return 0; -} diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/data_recv.cpp b/evaluate_service/hardwares/davinci/samples/atlas200dk/src/data_recv.cpp deleted file mode 100644 index 1171085..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/data_recv.cpp +++ /dev/null @@ -1,39 +0,0 @@ -/** -* @file data_recv.cpp -* -* Copyright(c)<2018>, -* -* @version 1.0 -* -* @date 2018-6-7 -*/ -#include -#include -#include -#include -#include "inc/data_recv.h" -#include "inc/error_code.h" -#include "inc/common.h" -#include "inc/util.h" -extern uint32_t g_count; - -/** -* @ingroup ClassifyNetDataRecvInterface -* @brief RecvData RecvData callback,Save the file. -* @param [in] -*/ -HIAI_StatusT ClassifyNetDataRecvInterface::RecvData - (const std::shared_ptr& message) -{ - std::shared_ptr data = - std::static_pointer_cast(message); - if (nullptr == data) - { - HIAI_ENGINE_LOG("Fail to receive data"); - return HIAI_INVALID_INPUT_MSG; - } - - g_count++; - Util::ClassifyDump(file_name_, data); - return HIAI_OK; -} diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/sample_data.cpp b/evaluate_service/hardwares/davinci/samples/atlas200dk/src/sample_data.cpp deleted file mode 100644 index fcddbdf..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/sample_data.cpp +++ /dev/null @@ -1,49 +0,0 @@ -/** -* @file sample_main.cpp -* -* Copyright (C) <2018> . All Rights Reserved. -* -* This program is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -*/ -#include "inc/sample_data.h" -#include "hiaiengine/data_type.h" -// Registers serialization and deserialization functions. -/** -* @ingroup hiaiengine -* @brief GetTransSearPtr, Serializes Trans data. -* @param [in] : data_ptr Struct Pointer -* @param [out]:struct_str Struct buffer -* @param [out]:data_ptr Struct data pointer -* @param [out]:struct_size Struct size -* @param [out]:data_size Struct data size -* @author w00437212 -*/ -void GetTransSearPtr(void* inputPtr, std::string& ctrlStr, uint8_t*& dataPtr, uint32_t& dataLen) -{ - EngineTransNewT* engine_trans = (EngineTransNewT*)inputPtr; - ctrlStr = std::string((char*)inputPtr, sizeof(EngineTransNewT)); - dataPtr = (uint8_t*)engine_trans->trans_buff.get(); - dataLen = engine_trans->buffer_size; -} - -/** -* @ingroup hiaiengine -* @brief GetTransSearPtr, Deserialization of Trans data -* @param [in] : ctrl_ptr Struct Pointer -* @param [in] : data_ptr Struct data Pointer -* @param [out]:std::shared_ptr Pointer to the pointer that is transmitted to the Engine -* @author w00437212 -*/ -std::shared_ptr GetTransDearPtr(const char* ctrlPtr, const uint32_t& ctrlLen, const uint8_t* dataPtr, const uint32_t& dataLen) -{ - EngineTransNewT* engine_trans = (EngineTransNewT*)ctrlPtr; - std::shared_ptr engineTranPtr(new EngineTransNewT); - engineTranPtr->buffer_size = engine_trans->buffer_size; - engineTranPtr->trans_buff.reset(const_cast(dataPtr), hiai::Graph::ReleaseDataBuffer); - return std::static_pointer_cast(engineTranPtr); -} - -// RegisterEngineTransNewT -HIAI_REGISTER_SERIALIZE_FUNC("EngineTransNewT", EngineTransNewT, GetTransSearPtr, GetTransDearPtr); \ No newline at end of file diff --git a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/util.cpp b/evaluate_service/hardwares/davinci/samples/atlas200dk/src/util.cpp deleted file mode 100644 index 3b55c55..0000000 --- a/evaluate_service/hardwares/davinci/samples/atlas200dk/src/util.cpp +++ /dev/null @@ -1,81 +0,0 @@ -/** -* @file util.cpp -* -* Copyright(c)<2018>, -* -* @version 1.0 -* -* @date 2018-6-7 -*/ -#include -#include -#include -#include -#include "inc/util.h" -#include "inc/tensor.h" -#include "inc/common.h" -#include "hiaiengine/api.h" -#include "hiaiengine/ai_memory.h" -/** -* @ingroup Util -* @brief ReadBinFile Read the file and return the buffer. -* @param [in]:file_name -* @param [in]: file_size -* @param [out]: std::string -*/ -char* Util::ReadBinFile(std::shared_ptr file_name, - uint32_t* file_size, int32_t batchSize, bool& isDMalloc) -{ - std::filebuf *pbuf; - std::ifstream filestr; - size_t size; - filestr.open(file_name->c_str(), std::ios::binary); - if (!filestr) - { - return NULL; - } - - pbuf = filestr.rdbuf(); - size = pbuf->pubseekoff(0, std::ios::end, std::ios::in)*batchSize; - pbuf->pubseekpos(0, std::ios::in); - char * buffer = nullptr; - isDMalloc = true; - HIAI_StatusT getRet = hiai::HIAIMemory::HIAI_DVPP_DMalloc(size, (void*&)buffer); - if ((getRet != HIAI_OK) || (buffer == nullptr)) { - buffer = new(std::nothrow) char[size]; - if(buffer != nullptr) { - isDMalloc = false; - } - } - - pbuf->sgetn(buffer, size); - *file_size = size; - filestr.close(); - return buffer; -} - -/** -* @ingroup Util -* @brief CheckFileExist -* @param [in]:file_name -* @param [out]: std::string -*/ -bool Util::CheckFileExist(const std::string& file_name) -{ - std::ifstream f(file_name.c_str()); - return f.good(); -} - -/** -* @ingroup Util -* @brief ClassifyDump -* @param [in]: file_name -* @param [in]: data -*/ -void Util::ClassifyDump(const std::string& file_name, std::shared_ptr data) -{ - ddk::Tensor num; - num.fromarray(reinterpret_cast(const_cast(data->c_str())), DATA_NUM); - (void)num.dump(file_name); - -} diff --git a/evaluate_service/hardwares/kirin990_npu/inference_kirin990_npu.sh b/evaluate_service/hardwares/kirin990_npu/inference_kirin990_npu.sh deleted file mode 100644 index 82a2a66..0000000 --- a/evaluate_service/hardwares/kirin990_npu/inference_kirin990_npu.sh +++ /dev/null @@ -1,16 +0,0 @@ -MODEL_PATH=$1 -DATA_PATH=$2 -MOBILE_DIR=$3 -OUTPUT_PATH=$4 - -adb shell "mkdir $MOBILE_DIR" -adb shell "mkdir $MOBILE_DIR/out_dir" -adb push $DATA_PATH $MOBILE_DIR/data -adb push MODEL_PATH $MOBILE_DIR/data -adb shell "/data/local/tmp/model_run_tool --model=$MOBILE_DIR/kirin990_npu.om --input=$MOBILE_DIR/input.bin --output_dir=$MOBILE_DIR/out_dir/ --enable_item=1" >$OUTPUT_PATH/ome.log -adb shell "/data/local/tmp/data_proc_tool --result_path=$MOBILE_DIR/out_dir" -cd $OUTPUT_PATH -adb shell "ls $MOBILE_DIR/out_dir/*model.csv" |xargs -I {} adb pull {} result.csv - - - diff --git a/evaluate_service/hardwares/kirin990_npu/kirin990_npu.py b/evaluate_service/hardwares/kirin990_npu/kirin990_npu.py deleted file mode 100644 index 1ff8f51..0000000 --- a/evaluate_service/hardwares/kirin990_npu/kirin990_npu.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""The hardware of mobile.""" -import subprocess -import logging -import os -from evaluate_service.class_factory import ClassFactory -import datetime -import csv - - -@ClassFactory.register() -class Kirin990_npu(object): - """Kirin990_npu class.""" - - def __init__(self, optional_params): - self.current_path = os.path.dirname(os.path.abspath(__file__)) - - def convert_model(self, backend, model, weight, **kwargs): - """Convert the tf/caffe/mindspore model to botl model in mobile. - - :param backend: the backend can be one of "tensorflow", "caffe" and "mindspore" - :type backend: str - :param model: the model file need to convert - :type model: str - :param weight: the weight file need to convert - :type weight: str - """ - om_save_path = kwargs["save_dir"] - input_shape = kwargs["input_shape"] - out_nodes = kwargs["out_nodes"] - log_save_path = os.path.dirname(model) - command_line = ["bash", self.current_path + "/model_convert.sh", backend, - model, weight, om_save_path, log_save_path, input_shape, out_nodes] - try: - subprocess.check_output(command_line) - except subprocess.CalledProcessError as exc: - logging.error("convert model to bolt failed. The return message is : {}.".format(exc)) - - def inference(self, converted_model, input_data, **kwargs): - """Inference in Davinci. - - :param converted_model: converted model file - :type backend: str - :param input_data: the input data file - :type model: str - """ - self.mobile_dir = "/sdcard/evaluate_service/" + datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') - output_data_path = os.path.dirname(input_data) - if not os.path.isfile(converted_model): - converted_model = os.path.join(converted_model, "kirin990_npu.om") - - command_line = ["bash", self.current_path + "/inference_kirin990_npu.sh", - converted_model, input_data, self.mobile_dir, output_data_path] - try: - subprocess.check_output(command_line) - except subprocess.CalledProcessError as exc: - logging.error("inference failed. the return message is : {}.".format(exc)) - - result_file = os.path.join(output_data_path, "result.csv") - - latency = self._get_latency(result_file) - return latency, 0 - - def _get_latency(self, result_file): - """Get latency from the result file.""" - logging.info("The result file is {}.".format(result_file)) - - time_start = [] - time_end = [] - with open(result_file, "r") as f: - reader = csv.reader(f) - for row in reader: - if "inference begin" in row[0]: - time_start.append(float(row[2])) - if "inference end" in row[0]: - time_end.append(float(row[2])) - - latency = (sum(time_end) - sum(time_start)) / len(time_end) / 1000 - return latency diff --git a/evaluate_service/hardwares/kirin990_npu/model_convert.sh b/evaluate_service/hardwares/kirin990_npu/model_convert.sh deleted file mode 100644 index 4df431a..0000000 --- a/evaluate_service/hardwares/kirin990_npu/model_convert.sh +++ /dev/null @@ -1,18 +0,0 @@ -BACKEND=$1 -MODEL=$2 -WEIGHT=$3 -OM_SAVE_PATH=$4 -LOG_PATH=$5 -INPUT_SHAPE=$6 -OUT_NODES=$7 - -cd /data/tools/hwhiai-ddk-100.500.010.010/tools/tools_omg/ - -if [ $BACKEND == "tensorflow" ]; then - ./omg --model=$MODEL --framework=3 --output=$OM_SAVE_PATH/kirin990_npu --input_shape=$INPUT_SHAPE --out_nodes=$OUT_NODES >$LOG_PATH/omg.log 2>&1 -elif [ $BACKEND == "caffe" ]; then - ./omg --model=$MODEL --weight=$WEIGHT --framework=0 --output=$OM_SAVE_PATH/kirin990_npu >$LOG_PATH/omg.log 2>&1 -else - echo "[ERROR] omg model convert: The backend must be tensorflow or caffe." -fi - diff --git a/evaluate_service/hardwares/mobile/get_latency_from_log.sh b/evaluate_service/hardwares/mobile/get_latency_from_log.sh deleted file mode 100644 index 85381ce..0000000 --- a/evaluate_service/hardwares/mobile/get_latency_from_log.sh +++ /dev/null @@ -1,3 +0,0 @@ -LOG_FILE=$1 - -cat $LOG_FILE | grep run |awk -F ' ' '{print $2}' diff --git a/evaluate_service/hardwares/mobile/inference_bolt.sh b/evaluate_service/hardwares/mobile/inference_bolt.sh deleted file mode 100644 index 0b8e828..0000000 --- a/evaluate_service/hardwares/mobile/inference_bolt.sh +++ /dev/null @@ -1,12 +0,0 @@ -MODEL_PATH=$1 -DATA_PATH=$2 -MOBILE_DIR=$3 -OUTPUT_PATH=$4 - -adb shell "mkdir $MOBILE_DIR/data" -adb push $DATA_PATH $MOBILE_DIR/data -adb shell "/data/local/tmp/benchmark -m $MODEL_PATH -i $MOBILE_DIR/data/input.bin" >$OUTPUT_PATH/ome.log -cd $OUTPUT_PATH -#adb shell "ls $MOBILE_DIR/data/*.txt" |xargs -I {} adb pull {} result.txt -#cat ome.log |grep dims |awk -F ":" 'NR==2 {print $NF}' >output_dim.txt -adb pull /sdcard/BoltResult.txt ./ \ No newline at end of file diff --git a/evaluate_service/hardwares/mobile/mobile.py b/evaluate_service/hardwares/mobile/mobile.py deleted file mode 100644 index 1bc470a..0000000 --- a/evaluate_service/hardwares/mobile/mobile.py +++ /dev/null @@ -1,92 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""The hardware of mobile.""" -import subprocess -import logging -import os -from evaluate_service.class_factory import ClassFactory -import datetime -import numpy as np - - -@ClassFactory.register() -class Mobile(object): - """Mobile class.""" - - def __init__(self, optional_params): - self.current_path = os.path.dirname(os.path.abspath(__file__)) - - def convert_model(self, backend, model, weight, **kwargs): - """Convert the tf/caffe/mindspore model to botl model in mobile. - - :param backend: the backend can be one of "tensorflow", "caffe" and "mindspore" - :type backend: str - :param model: the model file need to convert - :type model: str - :param weight: the weight file need to convert - :type weight: str - """ - self.mobile_dir = "/sdcard/evaluate_service/" + datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') - model_name = os.path.basename(model).split(".")[0] - log_save_path = os.path.dirname(model) - precision = "FP32" - command_line = ["bash", self.current_path + "/model_convert.sh", backend, self.mobile_dir, - model, weight, model_name, precision, log_save_path] - try: - subprocess.check_output(command_line) - except subprocess.CalledProcessError as exc: - logging.error("convert model to bolt failed. The return message is : {}.".format(exc)) - - def inference(self, converted_model, input_data, **kwargs): - """Inference in Davinci. - - :param converted_model: converted model file - :type backend: str - :param input_data: the input data file - :type model: str - """ - output_data_path = os.path.dirname(input_data) - command_line = ["bash", self.current_path + "/inference_bolt.sh", - converted_model, input_data, self.mobile_dir, output_data_path] - try: - subprocess.check_output(command_line) - except subprocess.CalledProcessError as exc: - logging.error("inference failed. the return message is : {}.".format(exc)) - - result_file = os.path.join(output_data_path, "BoltResult.txt") - - latency = self._get_latency(os.path.join(output_data_path, "ome.log")) - output = self._get_output(result_file) - return latency, output - - def _get_latency(self, log_file): - """Get latency from the log file.""" - logging.info("The log file is {}.".format(log_file)) - command_line = ["bash", self.current_path + "/get_latency_from_log.sh", log_file] - try: - latency = subprocess.check_output(command_line) - return str(latency, 'utf-8').split("\n")[0] - except subprocess.CalledProcessError as exc: - logging.error("get_latency_from_log failed. the return message is : {}.".format(exc)) - - def _get_output(self, result_file): - """Get output data of bolt.""" - with open(result_file, 'r') as f: - values = f.readlines() - output = [] - for index, value in enumerate(values): - if index == 0: - shapes = value.strip().split(",") - shapes = [int(i) for i in shapes] - else: - output.append(np.float(value)) - output = np.array(output).reshape(shapes).tolist() - return output diff --git a/evaluate_service/hardwares/mobile/model_convert.sh b/evaluate_service/hardwares/mobile/model_convert.sh deleted file mode 100644 index 17c8e43..0000000 --- a/evaluate_service/hardwares/mobile/model_convert.sh +++ /dev/null @@ -1,23 +0,0 @@ -BACKEND=$1 -MOBILE_WORK_DIR=$2 -MODEL=$3 -WEIGHT=$4 -MODEL_NAME=$5 -PRECISON=$6 -LOG_PATH=$7 - - -adb shell "mkdir $MOBILE_WORK_DIR" -adb push $MODEL $MOBILE_WORK_DIR - -if [ $BACKEND == "tensorflow" ]; then - adb shell "./data/evaluate_service/tools/tflite2bolt $MOBILE_WORK_DIR $MODEL_NAME $PRECISON" >$LOG_PATH/omg.log -elif [ $BACKEND == "caffe" ]; then - adb push $WEIGHT $MOBILE_WORK_DI - adb shell "./data/evaluate_service/tools/caffe2bolt $MOBILE_WORK_DIR $MODEL_NAME $PRECISON" >$LOG_PATH/omg.log -elif [ $BACKEND == "onnx" ]; then - adb shell "/data/local/tmp/X2bolt -d $MOBILE_WORK_DIR/ -m $MODEL_NAME -i $PRECISON" >$LOG_PATH/omg.log -else - echo "[ERROR] Bolt model convert: The backend must be tensorflow, caffe or onnx." -fi - diff --git a/evaluate_service/main.py b/evaluate_service/main.py deleted file mode 100644 index ba86f3d..0000000 --- a/evaluate_service/main.py +++ /dev/null @@ -1,189 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""The Evaluate Service of the service.""" -import os -import logging - -try: - import flask - import flask_restful - import werkzeug -except Exception: - logging.warning( - "The dependencies [Flask==1.1.2,Flask-RESTful==0.3.8, Werkzeug==1.0.1 ] have not been install, \ - and will install it automatically, if failed, please install it manually.") - os.system("pip3 install Flask==1.1.2") - os.system("pip3 install Flask-RESTful==0.3.8") - os.system("pip3 install Werkzeug==1.0.1") - -from flask import Flask, request -from flask_restful import Resource, Api - -try: - from werkzeug import secure_filename -except Exception: - from werkzeug.utils import secure_filename - -import glob -import multiprocessing -import time -import shutil -from evaluate_service.class_factory import ClassFactory -from .hardwares import * # noqa F401 -import datetime -import traceback -import argparse - - -app = Flask(__name__) -api = Api(app) - - -class Evaluate(Resource): - """Evaluate Service for service.""" - - def __init__(self): - self.result = {"latency": "9999", "out_data": [], "status": "sucess", "timestamp": "", "error_message": ""} - - @classmethod - def _add_params(cls, work_path, optional_params): - cls.current_path = work_path - cls.optional_params = optional_params - - def post(self): - """Interface to response to the post request of the client.""" - try: - self.parse_paras() - self.upload_files() - self.hardware_instance = ClassFactory.get_cls(self.hardware)(self.optional_params) - except Exception: - self.result["status"] = "Params error." - self.result["error_message"] = traceback.format_exc() - logging.error("[ERROR] Params error!") - traceback.print_exc() - return self.result - - if self.reuse_model == "True": - logging.warning("Reuse the model, no need to convert the model.") - else: - try: - self.hardware_instance.convert_model(backend=self.backend, model=self.model, weight=self.weight, - save_dir=self.share_dir, input_shape=self.input_shape, - out_nodes=self.out_nodes, precision=self.precision) - except Exception: - self.result["status"] = "Model convert failed." - self.result["error_message"] = traceback.format_exc() - logging.error("[ERROR] Model convert failed!") - traceback.print_exc() - return self.result - try: - latency_sum = 0 - for repeat in range(min(self.repeat_times, 10)): - latency, output = self.hardware_instance.inference(converted_model=self.share_dir, - input_data=self.input_data) - latency_sum += float(latency) - self.result["latency"] = latency_sum / self.repeat_times - self.result["out_data"] = output - except Exception: - self.result["status"] = "Inference failed." - self.result["error_message"] = traceback.format_exc() - logging.error("[ERROR] Inference failed! ") - traceback.print_exc() - return self.result - - def parse_paras(self): - """Parse the parameters in the request from the client.""" - self.backend = request.form["backend"] - self.hardware = request.form["hardware"] - self.reuse_model = request.form["reuse_model"] - self.job_id = request.form["job_id"] - self.input_shape = request.form.get("input_shape", type=str, default="") - self.out_nodes = request.form.get("out_nodes", type=str, default="") - self.repeat_times = int(request.form.get("repeat_times")) - self.precision = request.form.get("precision", type=str, default="FP32") - - def upload_files(self): - """Upload the files from the client to the service.""" - self.now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') - self.result["timestamp"] = self.now_time - logging.warning("The timestamp is {}.".format(self.now_time)) - self.upload_file_path = os.path.join(self.current_path, "out", self.now_time) - self.share_dir = os.path.join(self.current_path, "out", self.job_id) - os.makedirs(self.upload_file_path) - - model_file = request.files.get("model_file") - if model_file is not None: - self.model = self.upload_file_path + "/" + secure_filename(model_file.filename) - model_file.save(self.model) - - data_file = request.files.get("data_file") - if data_file is not None: - self.input_data = self.upload_file_path + "/" + secure_filename(data_file.filename) - data_file.save(self.input_data) - - weight_file = request.files.get("weight_file") - if weight_file is not None: - self.weight = self.upload_file_path + "/" + secure_filename(weight_file.filename) - weight_file.save(self.weight) - else: - self.weight = "" - logging.warning("upload file sucess!") - - -def _clean_data_path(clean_interval, work_path): - while True: - _clean_time = time.time() - clean_interval - # _current_path = os.path.dirname(os.path.abspath(__file__)) - folder_pattern = "{}/out/*".format(work_path) - folders = glob.glob(folder_pattern) - for folder in folders: - if os.path.isdir(folder) and os.path.getctime(folder) < _clean_time: - logging.warning("remove old folder: {}".format(folder)) - try: - shutil.rmtree(folder) - except Exception: - logging.warning("failed to remove {}".format(folder)) - time.sleep(3600) - - -def _parse_args(): - parser = argparse.ArgumentParser(description="Evaluate service") - parser.add_argument("-i", "--host_ip", type=str, required=True, help="the ip of the evaluate service machine") - parser.add_argument("-p", "--port", type=int, required=False, default=8888, help="the listening port") - parser.add_argument("-w", "--work_path", type=str, required=True, help="the work dir to save the file") - parser.add_argument("-t", "--davinci_environment_type", type=str, required=False, default="ATLAS300", - help="the type the davinci hardwares") - parser.add_argument("-c", "--clean_interval", type=int, required=False, default=1 * 6 * 3600, - help="the time interval to clean the temp folder") - parser.add_argument("-u", "--ddk_user_name", type=str, required=False, default="user", - help="the user to acess ATLAS200200 DK") - parser.add_argument("-atlas_host_ip", "--atlas_host_ip", type=str, required=False, default=None, - help="the ip of ATLAS200200 DK") - args = parser.parse_args() - return args - - -def run(): - """Run the evaluate service.""" - args = _parse_args() - ip_address = args.host_ip - listen_port = args.port - clean_interval = args.clean_interval - work_path = args.work_path - optional_params = {"davinci_environment_type": args.davinci_environment_type, - "ddk_user_name": args.ddk_user_name, - "atlas_host_ip": args.atlas_host_ip - } - p = multiprocessing.Process(target=_clean_data_path, args=(clean_interval, work_path), daemon=True) - p.start() - Evaluate._add_params(work_path, optional_params) - api.add_resource(Evaluate, '/') - app.run(host=ip_address, port=listen_port, threaded=False) diff --git a/evaluate_service/setup.py b/evaluate_service/setup.py new file mode 100644 index 0000000..7791459 --- /dev/null +++ b/evaluate_service/setup.py @@ -0,0 +1,93 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Setuptools of evaluate service.""" + +import sys +import os +import shutil +import setuptools +from setuptools.command.build_py import build_py +from setuptools.command.sdist import sdist + + +if sys.version_info < (3, 6): + sys.exit("Sorry, Python < 3.6 is not supported.") + + +with open("RELEASE.md", "r") as fh: + long_desc = fh.read() + + +def _copy_security_folder(): + cur_path = os.path.dirname(os.path.abspath(__file__)) + src_path = os.path.join(cur_path, "../vega/security") + dst_path = os.path.join(cur_path, "evaluate_service/security") + if not os.path.exists(dst_path): + shutil.copytree(src_path, dst_path) + + +class custom_build_py(build_py): + """Custom build_py.""" + + def run(self): + """Copy security folder before run.""" + _copy_security_folder() + setuptools.command.build_py.build_py.run(self) + + +class custom_sdist(sdist): + """Custom sdist.""" + + def run(self): + """Copy security folder before run.""" + _copy_security_folder() + setuptools.command.sdist.sdist.run(self) + + +setuptools.setup( + name="evaluate-service", + version="1.8.0", + packages=["evaluate_service"], + include_package_data=True, + python_requires=">=3.6", + author="Huawei Noah's Ark Lab", + author_email="", + description="AutoML Toolkit", + long_description=long_desc, + long_description_content_type="text/markdown", + license="Apache License 2.0", + url="https://github.com/huawei-noah/vega/evaluate-service", + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + "Operating System :: POSIX :: Linux", + ], + install_requires=[ + "Flask-RESTful", + "Flask-Limiter", + "gevent", + ], + cmdclass={ + "build_py": custom_build_py, + "sdist": custom_sdist, + }, + entry_points=""" + [console_scripts] + vega-evaluate-service=evaluate_service.main:run + vega-encrypt_key=evaluate_service.security.kmc.encrypt_key:main + """, +) diff --git a/evaluate_service/tools/compare_result.py b/evaluate_service/tools/compare_result.py deleted file mode 100644 index a5d41bd..0000000 --- a/evaluate_service/tools/compare_result.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -"""The tools to compare the output file with the benchmark.""" -import struct -import numpy as np -import logging - - -def binary2list(binary_file, size, dtype): - """Convert the binary file to a list. - - :param binary_file: the binary file name - :type binary_file: str - :param size: the size of the output - :type size: int - :param dtype: the dtype of the output - :type dtype: str - """ - FMT_MAP = {"f": 4, "q": 8} - if dtype == "float32": - fmt = "f" - elif dtype == "int64": - fmt = "q" - else: - raise ValueError("The dtype should be float32 or int64.") - list_data = np.zeros(size) - with open(binary_file, "rb") as f: - for index in range(size): - list_data[index] = struct.unpack(fmt, f.read(FMT_MAP[fmt]))[0] - - return list_data - - -def data_compare(real_out, expect_out, atol=0.001, rtol=0.001): - """Compare the output between the real and the expect. - - :param real_out: the real output - :type real_out: list - :param expect_out: the expect putput, or the benchmark - :type expect_out: list - :param atol: the absolute error, defaults to 0.001 - :type atol: float, optional - :param rtol: the relative error, defaults to 0.001 - :type rtol: float, optional - :return: return the error count and the error ratio - :rtype: [type] - """ - error_count = 0 - if len(real_out) != len(expect_out): - raise ValueError("The size of real_out and expect_out must be equal.") - for n in range(len(real_out)): - if abs(real_out[n] - expect_out[n]) > atol or abs(real_out[n] - expect_out[n]) / abs(expect_out[n]) > rtol: - logging.warning("pos: {}, real_out: {}, expect_out: {}, diff: {} ".format( - [n], real_out[n], expect_out[n], real_out[n] - expect_out[n])) - error_count += 1 - return error_count, error_count / len(real_out) - - -if __name__ == "__main__": - real_out_path = "./_output_0.bin" - expect_out_path = "./expect_out_1.data" - real_out = binary2list(real_out_path, 1001, "float32") - expect_out = binary2list(expect_out_path, 1001, "float32") - res = data_compare(real_out, expect_out) - logging.warning("error_count:{}, error ratio: {}".format(res[0], res[1])) diff --git a/evaluate_service/tools/data_convert.py b/evaluate_service/tools/data_convert.py deleted file mode 100644 index 9012008..0000000 --- a/evaluate_service/tools/data_convert.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- -"""The tools to convert the binary data to numpy array.""" -import numpy as np -import struct - -FMT_MAP = {"f": 4, "q": 8} - - -def binary2np(binary_file, shape, dtype): - """Convert the binary to numpy array. - - :param binary_file: the binary file name - :type binary_file: .bin or .data - :param shape: the arrary shape of the arrary - :type shape: list - :param dtype: the dtype of the output data - :type dtype: str - """ - dim_size = len(shape) - np_data = np.zeros(shape) - fmt = fmt_map(dtype) - with open(binary_file, "rb") as f: - if dim_size == 1: - for dim1 in range(shape[0]): - np_data[dim1] = struct.unpack(fmt, f.read(FMT_MAP[fmt]))[0] - elif dim_size == 2: - for dim1 in range(shape[0]): - for dim2 in range(shape[1]): - np_data[dim1, dim2] = struct.unpack(fmt, f.read(FMT_MAP[fmt]))[0] - elif dim_size == 3: - for dim1 in range(shape[0]): - for dim2 in range(shape[1]): - for dim3 in range(shape[2]): - np_data[dim1, dim2, dim3] = struct.unpack(fmt, f.read(FMT_MAP[fmt]))[0] - elif dim_size == 4: - np_data = binary2np_extra(binary_file, shape, dtype) - else: - raise ValueError("The dim size should be less than 5.") - return np_data - - -def binary2np_extra(binary_file, shape, dtype): - """Convert the binary to numpy array. - - :param binary_file: the binary file name - :type binary_file: .bin or .data - :param shape: the arrary shape of the arrary - :type shape: list - :param dtype: the dtype of the output data - :type dtype: str - """ - np_data = np.zeros(shape) - fmt = fmt_map(dtype) - with open(binary_file, "rb") as f: - for dim1 in range(shape[0]): - for dim2 in range(shape[1]): - for dim3 in range(shape[2]): - for dim4 in range(shape[3]): - np_data[dim1, dim2, dim3, dim4] = struct.unpack(fmt, f.read(FMT_MAP[fmt]))[0] - return np_data - - -def fmt_map(dtype): - """Map dataformat. - - :param dtype: the dtype of the data - :type dtype: str - :return: the mapping format - :rtype: str - """ - if dtype == "float32": - fmt = "f" - elif dtype == "int64": - fmt = "q" - else: - raise ValueError("The dtype should be float32 or int64.") - return fmt diff --git a/evaluate_service/tools/inference_caffe_cpu.py b/evaluate_service/tools/inference_caffe_cpu.py deleted file mode 100644 index 8df90f2..0000000 --- a/evaluate_service/tools/inference_caffe_cpu.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding: utf-8 -*- -"""The tools to inference caffe model in cpu to get the benchmark output.""" -import caffe -from data_convert import binary2np - - -def inference_caffe_cpu(model_file, weight_file, input_data_path, input_shape, input_dtype, output_node_name): - """Inference of the tensorflow in cpu to get the benchmark output. - - :param model_file: the caffe model, .pbtotxt file - :type model_file: str - :param weight_file: the caffe weight, .caffemodel - :type weight_file: str - :param input_data_path: input data file, the .bin or .data format - :type input_data_path: str - :param input_shape: the shape of the input data - :type input_shape: list - :param input_dtype: the dtype of the input data - :type input_dtype: str - :param output_node_name: the output_node name in the graph - :type output_node_name: str - """ - net = caffe.Net(model_file, weight_file, caffe.TEST) - input_data = binary2np(input_data_path, input_shape, input_dtype) - net.blobs['data'].data[...] = input_data - net.forward() - res = net.blobs[output_node_name].data[0] - - res.tofile("expect_out.data") diff --git a/evaluate_service/tools/inference_onnx_cpu.py b/evaluate_service/tools/inference_onnx_cpu.py deleted file mode 100644 index 20aab2b..0000000 --- a/evaluate_service/tools/inference_onnx_cpu.py +++ /dev/null @@ -1,24 +0,0 @@ -# -*- coding: utf-8 -*- -"""The tools to inference onnx model in cpu to get the benchmark output.""" -import onnxruntime -from data_convert import binary2np - - -def inference_onnx_cpu(onnx_file, input_data_path, input_shape, input_dtype): - """Inference of the onnx in cpu to get the benchmark output. - - :param onnx_file: the onnx file - :type onnx_file: str - :param input_data_path: input data file, the .bin or .data format - :type input_data_path: str - :param input_shape: the shape of the input - :type input_shape: list - :param input_dtype: the dtype of input - :type input_dtype: str - """ - input_data = binary2np(input_data_path, input_shape, input_dtype) - sess = onnxruntime.InferenceSession(onnx_file) - output_nodes = sess.get_outputs()[0].name - input_nodes = sess.get_inputs()[0].name - res = sess.run([output_nodes], {input_nodes: input_data}) - res.tofile("expect_out.data") diff --git a/evaluate_service/tools/inference_tf_cpu.py b/evaluate_service/tools/inference_tf_cpu.py deleted file mode 100644 index afb3085..0000000 --- a/evaluate_service/tools/inference_tf_cpu.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- -"""The tools to inference tensorflow model in cpu to get the benchmark output.""" -import tensorflow as tf -from tensorflow.python.platform import gfile -from data_convert import binary2np - - -def inference_tf_cpu(pb_file, input_data_path, input_shape, input_dtype, input_node_name, output_node_name): - """Inference of the tensorflow in cpu to get the benchmark output. - - :param pb_file: the tensorflow model, .pb file - :type pb_file: str - :param input_data_path: input data file, the .bin or .data format - :type input_data_path: str - :param input_shape: the shape of the input data - :type input_shape: list - :param input_dtype: the dtype of the input data - :type input_dtype: str - :param input_node_name: the input_node_name in the graph - :type input_node_name: str - :param output_node_name: the output_node name in the graph - :type output_node_name: str - """ - sess = tf.compat.v1.Session() - with gfile.FastGFile(pb_file, 'rb') as f: - graph_def = tf.GraphDef() - graph_def.ParseFromString(f.read()) - sess.graph.as_default() - # import the graph - tf.import_graph_def(graph_def, name='') - - # Intialize - sess.run(tf.global_variables_initializer()) - - input_data = binary2np(input_data_path, input_shape, input_dtype) - op = sess.graph.get_tensor_by_name(output_node_name) - input_data_graph = sess.graph.get_tensor_by_name(input_node_name) - res = sess.run(op, feed_dict={input_data_graph: input_data}) - - res.tofile("expect_out.data") diff --git a/examples/README.en.md b/examples/README.en.md deleted file mode 100644 index 95e8409..0000000 --- a/examples/README.en.md +++ /dev/null @@ -1,18 +0,0 @@ -# Run examples - -[中文](./README.md) - -Run the following command line to run the example: - -```bash -vega -``` - -E.g: - -```bash -vega ./nas/cars/cars.yml -``` - -**Before running the example, please make sure that the path of the dataset and pre-trained model are configured correctly.** -For more details, please refer to [Example Reference](../docs/en/user/examples.md). diff --git a/examples/README.md b/examples/README.md index fe73df9..e922459 100644 --- a/examples/README.md +++ b/examples/README.md @@ -1,6 +1,21 @@ -# 运行示例 +## Run examples -[english](./README.en.md) +Run the following command line to run the example: + +```bash +vega +``` + +E.g: + +```bash +vega ./nas/cars/cars.yml +``` + +**Before running the example, please make sure that the path of the dataset and pre-trained model are configured correctly.** +For more details, please refer to [Example Reference](../docs/en/user/examples.md). + +## 运行示例 执行如下命令行,运行示例: diff --git a/examples/classification/classification.yml b/examples/classification/classification.yml deleted file mode 100644 index e175605..0000000 --- a/examples/classification/classification.yml +++ /dev/null @@ -1,148 +0,0 @@ -general: - backend: tensorflow - - -pipeline: [fine_tune, nas, hpo, fully_train] - - -fine_tune: - pipe_step: - type: TrainPipeStep - model: - head: 'resnet_model/dense/' - pretrained_model_file: /cache/models/resnet_imagenet_v1_fp32_20181001 - model_desc: - type: ResNetTF - resnet_size: 50 - num_classes: 10 - trainer: - type: Trainer - epochs: 40 - mixup: True - optimizer: - type: SGD - params: - lr: 0.003 - momentum: 0.9 - weight_decay: !!float 1e-4 - lr_scheduler: - type: WarmupScheduler - params: - warmup_type: linear - warmup_iters: 5 - warmup_ratio: 0.01 - after_scheduler_config: - type: MultiStepLR - params: - milestones: [30] - gamma: 0.1 - loss: - type: CrossEntropyLoss - - dataset: - type: Cifar10 - common: - data_path: /cache/datasets/cifar10/ - batch_size: 128 - train: - transforms: - - type: Resize - size: [256, 256] - - type: RandomCrop - size: [224, 224] - - type: RandomHorizontalFlip - - type: ToTensor - - type: Normalize - mean: [0.50, 0.5, 0.5] - std: [0.50, 0.5, 0.5] - val: - transforms: - - type: Resize - size: [224, 224] - - type: ToTensor - - type: Normalize - mean: [0.50, 0.5, 0.5] - std: [0.50, 0.5, 0.5] - test: - transforms: - - type: Resize - size: [224, 224] - - type: ToTensor - - type: Normalize - mean: [0.50, 0.5, 0.5] - std: [0.50, 0.5, 0.5] - evaluator: - type: Evaluator - host_evaluator: - type: HostEvaluator - metric: - type: accuracy - -nas: - pipe_step: - type: SearchPipeStep - search_algorithm: - type: EvolutionAlgorithm - objective_keys: ['accuracy', 'flops'] - policy: - num_individual: 8 - num_generation: 4 - random_samples: 32 - search_space: - type: BackboneNasSearchSpace - model: - model_desc_file: "{local_base_path}/output/fine_tune/desc_0.json" - pretrained_model_file: "{local_base_path}/output/fine_tune/model_0/" - trainer: - ref: fine_tune.trainer - max_train_steps: 1 - epochs: 1 - dataset: - ref: fine_tune.dataset - - -hpo: - pipe_step: - type: SearchPipeStep - search_algorithm: - type: AshaHpo - policy: - total_epochs: 81 - search_space: - type: SearchSpace - hyperparameters: - - key: trainer.optimizer.params.lr - type: FLOAT_EXP - range: [0.001, 0.01] - - key: trainer.optimizer.type - type: CATEGORY - range: ['Adam', 'SGD'] - - key: trainer.optimizer.params.momentum - type: FLOAT - range: [0.5, 0.99] - condition: - - key: condition_for_sgd_momentum - child: trainer.optimizer.params.momentum - parent: trainer.optimizer.type - type: EQUAL - range: ["SGD"] - trainer: - ref: fine_tune.trainer - model: - model_desc_file: "{local_base_path}/output/fine_tune/desc_0.json" - pretrained_model_file: "{local_base_path}/output/fine_tune/model_0/" - dataset: - ref: fine_tune.dataset - - -fully_train: - pipe_step: - type: TrainPipeStep - models_folder: "{local_base_path}/output/nas/" - trainer: - ref: fine_tune.trainer - hps_file: "{local_base_path}/output/hpo/" - evaluator: - ref: fine_tune.evaluator - dataset: - ref: fine_tune.dataset diff --git a/examples/features/script_runner/bohb.yml b/examples/features/script_runner/boss.yml similarity index 67% rename from examples/features/script_runner/bohb.yml rename to examples/features/script_runner/boss.yml index 82e9bc4..31aac09 100644 --- a/examples/features/script_runner/bohb.yml +++ b/examples/features/script_runner/boss.yml @@ -10,9 +10,10 @@ hpo: pipe_step: type: SearchPipeStep search_algorithm: - type: BohbHpo + type: BossHpo + # tuner: hebo # rf,gp,hebo policy: - total_epochs: 5 + total_epochs: 300 search_space: type: SearchSpace hyperparameters: @@ -28,17 +29,11 @@ hpo: - key: trainer.optimizer.params.momentum type: FLOAT range: [0.8, 0.99] - condition: - - key: condition_for_sgd_momentum - child: trainer.optimizer.params.momentum - parent: trainer.optimizer.type - type: EQUAL - range: ["SGD"] trainer: type: ScriptRunner - script: "./train.py" - # script: "./train_vega.py" + script: "/my_code/train.py" + # script: "/my_code/train_vega.py" fullytrain: @@ -46,6 +41,6 @@ fullytrain: type: TrainPipeStep trainer: type: ScriptRunner - epochs: 2 - # script: "./train.py" - script: "./train_vega.py" + epochs: 100 + script: "/my_code/train.py" + # script: "/my_code/train_vega.py" diff --git a/examples/features/script_runner/train.py b/examples/features/script_runner/train.py index e8961da..12cd76b 100644 --- a/examples/features/script_runner/train.py +++ b/examples/features/script_runner/train.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The example of training model.""" @@ -18,6 +24,7 @@ from torch.nn import functional as F from vega.trainer.trial_agent import TrialAgent +logging.basicConfig(level=logging.INFO) logging.info("load trial") trial = TrialAgent() @@ -44,7 +51,7 @@ if otpimizer_name == "SGD": optimizer = torch.optim.SGD(resnet18.parameters(), **otpimizer_params) else: - optimizer = torch.optim.Adam(resnet18.parameters(), **otpimizer_params) + optimizer = torch.optim.Adam(resnet18.parameters(), lr=otpimizer_params["lr"]) loss_fn = torch.nn.CrossEntropyLoss().cuda() logging.info("training ...") diff --git a/examples/features/script_runner/train_vega.py b/examples/features/script_runner/train_vega.py index 701548d..40af0e1 100644 --- a/examples/features/script_runner/train_vega.py +++ b/examples/features/script_runner/train_vega.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The example of training model.""" @@ -19,14 +25,14 @@ logging.info("create model") vega.set_backend("pytorch", "GPU") -model = vega.network("ResNet", depth=18).cuda() +model = vega.get_network("ResNet", depth=18).cuda() logging.info("load dataset") -train_loader = vega.dataset("Cifar10", data_path="/cache/datasets/cifar10", mode="train", batch_size=256).loader -test_loader = vega.dataset("Cifar10", data_path="/cache/datasets/cifar10", mode="test", batch_size=256).loader +train_loader = vega.get_dataset("Cifar10", data_path="/cache/datasets/cifar10", mode="train", batch_size=256).loader +test_loader = vega.get_dataset("Cifar10", data_path="/cache/datasets/cifar10", mode="test", batch_size=256).loader logging.info("create trainer") -trainer = vega.trainer(model=model, id=trial.worker_id, hps=trial.hps) +trainer = vega.get_trainer(model=model, id=trial.worker_id, hps=trial.hps) trainer.config.mixup = True trainer.train_loader = train_loader trainer.valid_loader = test_loader diff --git a/examples/fully_train/fmd/fmd.yml b/examples/fully_train/fmd/fmd.yml index d629e18..f281515 100644 --- a/examples/fully_train/fmd/fmd.yml +++ b/examples/fully_train/fmd/fmd.yml @@ -1,10 +1,8 @@ general: backend: pytorch - pipeline: [fully_train] - fully_train: pipe_step: type: TrainPipeStep diff --git a/examples/fully_train/fmd/run.py b/examples/fully_train/fmd/run.py index 3ee66bd..32ba010 100644 --- a/examples/fully_train/fmd/run.py +++ b/examples/fully_train/fmd/run.py @@ -1,21 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Run example.""" -from vega.tools.run_pipeline import run_pipeline - - -def _load_special_lib(config_file): - import fmd +import vega +from vega.tools.run_pipeline import main if __name__ == '__main__': - run_pipeline(load_special_lib_func=_load_special_lib) + vega.set_backend("pytorch") + import fmd + main() diff --git a/examples/nas/ghostnet_nas/ghostnet_nas.yml b/examples/nas/ghostnet_nas/ghostnet_nas.yml new file mode 100644 index 0000000..cb1d72d --- /dev/null +++ b/examples/nas/ghostnet_nas/ghostnet_nas.yml @@ -0,0 +1,115 @@ +# general: +# quota: "flops < 5 and params < 4e10" +pipeline: [nas, fully_train] + +nas: + pipe_step: + type: SearchPipeStep + search_algorithm: + type: BayesSearch + objective_keys: ['accuracy', 'params'] + num_samples: 50 + + search_space: + hyperparameters: + - key: network.block + type: CATEGORY + range: ['BasicBlock', 'Bottleneck'] + - key: network.stage1 + type: INT + range: [2,10] + - key: network.stage2 + type: INT + range: [2,10] + - key: network.stage3 + type: INT + range: [2,20] + - key: network.stage4 + type: INT + range: [2,10] + + model: + model_desc: + type: GhostNetNas + num_classes: 20 + + trainer: + type: Trainer + epochs: 1 + mixup: True + optimizer: + type: SGD + params: + lr: 0.1 + momentum: 0.9 + weight_decay: !!float 1e-4 + lr_scheduler: + type: MultiStepLR + by_epoch: True + params: + milestones: [50, 75, 90] + gamma: 0.1 + loss: + type: CrossEntropyLoss + params: + sparse: True + + dataset: + type: Cifar10 + common: + data_path: /cache/datasets/cifar10/ + batch_size: 64 + train: + transforms: + - type: Resize + size: [256, 256] + - type: RandomCrop + size: [224, 224] + - type: RandomHorizontalFlip + - type: ToTensor + - type: Normalize + mean: [0.50, 0.5, 0.5] + std: [0.50, 0.5, 0.5] + val: + transforms: + - type: Resize + size: [224, 224] + - type: ToTensor + - type: Normalize + mean: [0.50, 0.5, 0.5] + std: [0.50, 0.5, 0.5] + test: + transforms: + - type: Resize + size: [224, 224] + - type: ToTensor + - type: Normalize + mean: [0.50, 0.5, 0.5] + std: [0.50, 0.5, 0.5] + + +fully_train: + pipe_step: + type: TrainPipeStep + models_folder: "{local_base_path}/output/nas/" + trainer: + type: Trainer + epochs: 100 + mixup: True + optimizer: + type: SGD + params: + lr: 0.1 + momentum: 0.9 + weight_decay: !!float 1e-4 + lr_scheduler: + type: CosineAnnealingLR + by_epoch: True + params: + T_max: 100 + loss: + type: CrossEntropyLoss + params: + sparse: True + dataset: + ref: nas.dataset \ No newline at end of file diff --git a/examples/nas/modnas/mbv2.yml b/examples/nas/modnas/mbv2.yml index 33baca5..9a35f49 100644 --- a/examples/nas/modnas/mbv2.yml +++ b/examples/nas/modnas/mbv2.yml @@ -19,9 +19,10 @@ fully_train: type: CIFAR_MobileNetV2_GPU args: n_classes: 10 - desc_construct: - predefined: - type: MobileNetV2PredefinedConstructor + proc: augment + augment: + construct: + predefined: MobileNetV2PredefinedConstructor trainer: type: Trainer diff --git a/examples/nas/modnas/ps.yml b/examples/nas/modnas/ps.yml index 4dbc14e..c661483 100644 --- a/examples/nas/modnas/ps.yml +++ b/examples/nas/modnas/ps.yml @@ -31,11 +31,13 @@ nas: type: CIFAR_MobileNetV2_GPU args: n_classes: 10 - construct: - predefined: MobileNetV2PredefinedConstructor - elastic: MobileNetV2ElasticConstructor - desc_construct: - arch_desc: MobileNetV2ArchDescConstructor + search: + construct: + predefined: MobileNetV2PredefinedConstructor + elastic: MobileNetV2ElasticConstructor + augment: + construct: + arch_desc: MobileNetV2ArchDescConstructor trainer: type: Trainer diff --git a/examples/nas/simple_cnn/__init__.py b/examples/nas/simple_cnn/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/examples/nlp/bert.yml b/examples/nlp/bert.yml deleted file mode 100644 index 4ca6dc8..0000000 --- a/examples/nlp/bert.yml +++ /dev/null @@ -1,48 +0,0 @@ -pipeline: [fully_train] - -fully_train: - pipe_step: - type: TrainPipeStep - dataset: - type: GlueDataset - common: - task_name: mrpc - batch_size: 32 - data_path: /cache/datasets/MRPC/ - vocab_file: /cache/nlp/bert-base-uncased/vocab.txt - trainer: - type: Trainer - epochs: 3 - model_statistics: False - optimizer: - type: AdamW - params: - lr: 0.00001 - loss: - type: CrossEntropyLoss - metric: - type: NlpMetrics - model: - pretrained_model_file: /cache/nlp/bert-base-uncased/pytorch_model.bin - head: model.cls - model_desc: - type: BertClassification - config: { - "attention_probs_dropout_prob": 0.1, - "hidden_act": "gelu", - "hidden_dropout_prob": 0.1, - "hidden_size": 768, - "initializer_range": 0.02, - "intermediate_size": 3072, - "layer_norm_eps": !!float 1e-12, - "max_position_embeddings": 512, - "model_type": "bert", - "num_attention_heads": 12, - "num_hidden_layers": 12, - "pad_token_id": 0, - "type_vocab_size": 2, - "vocab_size": 30522 - } - - - diff --git a/examples/nlp/bert_md.yml b/examples/nlp/bert_md.yml deleted file mode 100644 index 4a30a6a..0000000 --- a/examples/nlp/bert_md.yml +++ /dev/null @@ -1,19 +0,0 @@ -general: - backend: mindspore #pytorch | tensorflow | mindspore - device_category: NPU - dft: True - -pipeline: [fully_train] - - -fully_train: - pipe_step: - type: TrainPipeStep # distributed: HcclTrainStep - model: - model_desc: - modules: ['bert'] - bert: - type: Bert - trainer: - type: BertTrainerCallback - epochs: 40 diff --git a/examples/nlp/tiny_bert.yml b/examples/nlp/tiny_bert.yml deleted file mode 100644 index d3699be..0000000 --- a/examples/nlp/tiny_bert.yml +++ /dev/null @@ -1,76 +0,0 @@ -pipeline: [fully_train] - -fully_train: - pipe_step: - type: TrainPipeStep - dataset: - type: GlueDataset - common: - batch_size: 32 - vocab_file: /cache/nlp/bert-base-uncased/vocab.txt - train: - pregenerated: True - data_path: /cache/datasets/english_wiki_book/json/ - val: - task_name: mrpc - data_path: /cache/datasets/MRPC/ - - trainer: - type: Trainer - epochs: 50 - model_statistics: False - call_metrics_on_train: False - optimizer: - type: AdamW - params: - lr: 0.0005 - loss: - type: SingleLoss - metric: - type: NlpMetrics - model: - pretrained_model_file: /home/chenchen/workspace/nlp/bert-base-uncased/pytorch_model.bin - head: 'cls' - model_desc: - type: TinyBertDistil - header: - type: BertClassificationHeader - hidden_size: 312 - num_labels: 2 - student: - type: TinyBertForPreTraining - config: { - "attention_probs_dropout_prob": 0.1, - "hidden_act": "gelu", - "hidden_dropout_prob": 0.1, - "hidden_size": 312, - "initializer_range": 0.02, - "intermediate_size": 1200, - "max_position_embeddings": 512, - "num_attention_heads": 12, - "num_hidden_layers": 4, - "pre_trained": "", - "type_vocab_size": 2, - "vocab_size": 30522 - } - teacher: - type: TinyBertForPreTraining - config: { - "attention_probs_dropout_prob": 0.1, - "hidden_act": "gelu", - "hidden_dropout_prob": 0.1, - "hidden_size": 768, - "initializer_range": 0.02, - "intermediate_size": 3072, - "layer_norm_eps": !!float 1e-12, - "max_position_embeddings": 512, - "model_type": "bert", - "num_attention_heads": 12, - "num_hidden_layers": 12, - "pad_token_id": 0, - "type_vocab_size": 2, - "vocab_size": 30522 - } - - - diff --git a/examples/prediction/temporal_spatial.yml b/examples/prediction/temporal_spatial.yml deleted file mode 100644 index bc7a796..0000000 --- a/examples/prediction/temporal_spatial.yml +++ /dev/null @@ -1,77 +0,0 @@ -general: - backend: tensorflow - -pipeline: [search] - -search: - pipe_step: - type: SearchPipeStep - - search_algorithm: - type: AshaHpo - objective_keys: RMSE - policy: - total_epochs: 100 - - search_space: - type: SearchSpace - hyperparameters: - - key: dataset.batch_size - type: CATEGORY - range: [8, 16, 24, 32] - - key: trainer.optimizer.params.lr - type: FLOAT_EXP - range: [0.0001, 0.1] - - key: trainer.optimizer.type - type: CATEGORY - range: ['Adam', 'SGD','RMSProp'] - - key: trainer.optimizer.params.momentum - type: FLOAT - range: [0.0, 0.99] - - key: network.gru_layers - type: CATEGORY - range: [1, 2, 3, 4] - - key: network.gcn_layers - type: CATEGORY - range: [1, 2, 3, 4] - condition: - - key: condition_for_sgd_momentum - child: trainer.optimizer.params.momentum - parent: trainer.optimizer.type - type: EQUAL - range: ["SGD"] - # - key: network.kernel_size - # type: CATEGORY - # range: [1, 2, 3, 4, 5] - model: - model_desc: - type: GCN - - trainer: - type: Trainer - optimizer: - type: SGD - params: - lr: 0.01 - momentum: 0.9 - loss: - type: ForecastLoss - metric: - type: RMSE - evaluator: - type: Evaluator - host_evaluator: - type: HostEvaluator - metric: - type: RMSE - - dataset: - type: SpatiotemporalDataset - common: - data_path: /cache/Abilene/df_inputated.csv - n_his: 12 - n_pred: 4 - - - - diff --git a/setup.py b/setup.py index 8087133..e4a8cd8 100644 --- a/setup.py +++ b/setup.py @@ -1,16 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Setuptools of vega.""" -import setuptools import sys +import setuptools if sys.version_info < (3, 6): @@ -23,8 +29,8 @@ setuptools.setup( name="noah-vega", - version="1.7.1", - packages=["vega", "evaluate_service"], + version="1.8.0", + packages=["vega"], include_package_data=True, python_requires=">=3.6", author="Huawei Noah's Ark Lab", @@ -32,45 +38,36 @@ description="AutoML Toolkit", long_description=long_desc, long_description_content_type="text/markdown", - license="MIT", + license="Apache License 2.0", url="https://github.com/huawei-noah/vega", - # packages=setuptools.find_packages(), classifiers=[ "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", + "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", ], install_requires=[ - "py-dag", - "pareto", "thop", "psutil", "pillow", "pyzmq", - "tf-slim", - "pandas==0.25.2", - "distributed==2.18.0", - "click==7.1.2", - "PyYAML==5.1.2", - "numpy==1.18.5", - "scipy==1.5.3", - "scikit-learn==0.21.3", - "opencv-python-headless==4.3.0.38", - "tensorboardX==1.9", - "tf-models-official==0.0.3.dev1", - "torch==1.3.0", - "torchvision==0.4.1", - "tensorflow-gpu>=1.14.0,<2.0", - # "onnx-simplifier" + "pandas", + "distributed", + "click", + "PyYAML", + "numpy", + "scipy", + "scikit-learn", + "opencv-python", + "tensorboardX", ], entry_points=""" [console_scripts] - vega=vega.tools.run_pipeline:run_pipeline - vega-kill=vega.tools.kill:_kill - vega-verify-cluster=vega.tools.verify_cluster:_verify_cluster - vega-fine-tune=vega.tools.fine_tune:_fine_tune - vega-progress=vega.tools.query_progress:print_progress - vega-process=vega.tools.query_process:print_processes - vega-evaluate-service=evaluate_service.main:run - """, + vega=vega.tools.run_pipeline:main + vega-inference=vega.tools.inference:main + vega-inference-det=vega.tools.detection_inference:main + vega-kill=vega.tools.kill:main + vega-progress=vega.tools.query_progress:main + vega-process=vega.tools.query_process:main + vega-encrypt_key=vega.security.kmc.encrypt_key:main + """, ) diff --git a/vega/__init__.py b/vega/__init__.py index 19812be..f6bbb56 100644 --- a/vega/__init__.py +++ b/vega/__init__.py @@ -1,4 +1,40 @@ -__version__ = "1.7.1" +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Vega's methods.""" + + +__all__ = [ + "set_backend", + "is_cpu_device", "is_gpu_device", "is_npu_device", + "is_ms_backend", "is_tf_backend", "is_torch_backend", + "get_devices", + "ClassFactory", "ClassType", + "FileOps", + "run", + "init_cluster_args", + "module_existed", + "TrialAgent", + "get_network", + "get_dataset", + "get_trainer", + "get_quota", +] + +__version__ = "1.8.0" import sys @@ -6,28 +42,30 @@ sys.exit('Sorry, Python < 3.6 is not supported.') -from .common.backend_register import * +from .common.backend_register import set_backend, is_cpu_device, is_gpu_device, is_npu_device, \ + is_ms_backend, is_tf_backend, is_torch_backend, get_devices from .common.class_factory import ClassFactory, ClassType +from .common.file_ops import FileOps from .core import run, init_cluster_args, module_existed from .trainer.trial_agent import TrialAgent -from .quota import * +from . import quota -def network(name, **kwargs): +def get_network(name, **kwargs): """Return network.""" return ClassFactory.get_cls(ClassType.NETWORK, name)(**kwargs) -def dataset(name, **kwargs): +def get_dataset(name, **kwargs): """Return dataset.""" return ClassFactory.get_cls(ClassType.DATASET, name)(**kwargs) -def trainer(name="Trainer", **kwargs): +def get_trainer(name="Trainer", **kwargs): """Return trainer.""" return ClassFactory.get_cls(ClassType.TRAINER, name)(**kwargs) -def quota(**kwargs): +def get_quota(**kwargs): """Return quota.""" return ClassFactory.get_cls(ClassType.QUOTA, "Quota")(**kwargs) diff --git a/vega/algorithms/__init__.py b/vega/algorithms/__init__.py index 8cf4cfd..a68ea4e 100644 --- a/vega/algorithms/__init__.py +++ b/vega/algorithms/__init__.py @@ -1,7 +1,5 @@ -from .nas import * # noqa: F401, F403 -from .hpo import * # noqa: F401, F403 -from .data_augmentation import * # noqa: F401, F403 -from .compression import * # noqa: F401, F403 -from .auto_loss import * # noqa: F401, F403 -from .fully_train import * -from .nlp import * +from . import nas +from . import hpo +from . import data_augmentation +from . import compression +from . import auto_loss diff --git a/vega/algorithms/auto_loss/ada_segment.py b/vega/algorithms/auto_loss/ada_segment.py index a707491..009ca21 100644 --- a/vega/algorithms/auto_loss/ada_segment.py +++ b/vega/algorithms/auto_loss/ada_segment.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Implementation of Ada-Segment algorithm.""" +from enum import Enum from torch import nn import numpy as np import pandas as pd -from enum import Enum import torch from torch.optim import Adam @@ -81,7 +87,6 @@ def __init__(self, model_num, total_rungs, loss_num, best_loss=None): self.is_completed = False self.rung_id = 0 self.estimator = WeightController(in_features=loss_num) - # self.estimator.apply(_weight_init()) self.optimizer = Adam(self.estimator.parameters(), lr=5e-2, weight_decay=5e-4) self.loss = Xloss(total_rungs=total_rungs) @@ -111,7 +116,6 @@ def propose(self): self.weight_tensor = weighted_loss / input_tensor self.weight_tensor = self.weight_tensor.squeeze() weight = self.weight_tensor.detach().numpy().tolist() - # explore new_weight = np.random.normal(weight, 0.2) sample["dynamic_weight"] = new_weight return sample @@ -141,8 +145,6 @@ def add_score(self, config_id, rung_id, reward, cur_loss): def _init_next_rung(self): current_rung_df = self.sieve_board.loc[(self.sieve_board['rung_id'] == self.rung_id)] - - # update the WeightController if self.rung_id >= 1: last_rung_df = self.sieve_board.loc[(self.sieve_board['rung_id'] == self.rung_id - 1)] cur_performance = current_rung_df['score'].to_list() diff --git a/vega/algorithms/auto_loss/ada_segment_conf.py b/vega/algorithms/auto_loss/ada_segment_conf.py index 7af2401..ef98e1d 100644 --- a/vega/algorithms/auto_loss/ada_segment_conf.py +++ b/vega/algorithms/auto_loss/ada_segment_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/auto_loss/adaptive_muti_loss.py b/vega/algorithms/auto_loss/adaptive_muti_loss.py index 619149b..ec9b16d 100644 --- a/vega/algorithms/auto_loss/adaptive_muti_loss.py +++ b/vega/algorithms/auto_loss/adaptive_muti_loss.py @@ -1,21 +1,27 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Autoloss class.""" import os import logging -import pickle -from .ada_segment import AdaSegment -from .ada_segment_conf import AdaSegConfig -from vega.common.task_ops import TaskOps +from vega.common import FileOps, TaskOps from vega.common.class_factory import ClassFactory, ClassType from vega.algorithms.hpo.hpo_base import HPOBase +from vega.common import General +from .ada_segment import AdaSegment +from .ada_segment_conf import AdaSegConfig @ClassFactory.register(ClassType.SEARCH_ALGORITHM) @@ -54,8 +60,7 @@ def update(self, record): worker_path = TaskOps().get_local_worker_path(step_name=record.get("step_name"), worker_id=record.get("worker_id")) saved_loss = os.path.join(worker_path, "muti_loss.pkl") - with open(saved_loss, "rb") as f: - cur_loss = pickle.load(f) + cur_loss = FileOps.load_pickle(saved_loss) if not rewards: rewards = -1 diff --git a/vega/algorithms/compression/__init__.py b/vega/algorithms/compression/__init__.py index 4dc3c82..9dd93ee 100644 --- a/vega/algorithms/compression/__init__.py +++ b/vega/algorithms/compression/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import compression algorithms.""" @@ -16,6 +22,5 @@ "prune_ea": ["PruneCodec", "PruneEA", "PruneSearchSpace", "PruneTrainerCallback"], "prune_ea_mobilenet": ["PruneMobilenetCodec", "PruneMobilenetTrainerCallback"], "quant_ea": ["QuantCodec", "QuantEA", "QuantTrainerCallback"], - "prune_dag": ["PruneDAGSearchSpace", "AdaptiveBatchNormalizationCallback", "SCOPDAGSearchSpace", - "KnockoffFeaturesCallback"], + "auto_prune": ["AutoPrune"], }) diff --git a/vega/algorithms/compression/auto_prune/__init__.py b/vega/algorithms/compression/auto_prune/__init__.py new file mode 100644 index 0000000..c02efc3 --- /dev/null +++ b/vega/algorithms/compression/auto_prune/__init__.py @@ -0,0 +1,3 @@ +from .auto_prune import AutoPrune + +__all__ = ["AutoPrune"] diff --git a/vega/algorithms/compression/auto_prune/auto_prune.py b/vega/algorithms/compression/auto_prune/auto_prune.py new file mode 100644 index 0000000..3dd69ba --- /dev/null +++ b/vega/algorithms/compression/auto_prune/auto_prune.py @@ -0,0 +1,138 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""AutoPrune.""" +import logging + +from vega.algorithms.compression.auto_prune.pruning import named_pruned_modules, compress +from vega.algorithms.hpo.sha_base.tuner import TunerBuilder +from vega.common import ClassType, ClassFactory, ConfigSerializable, callbacks +from vega.core.pipeline.conf import PipeStepConfig +from vega.core.search_algs import SearchAlgorithm +from vega.core.search_space import SpaceSet +from vega.model_zoo import ModelZoo + + +class AutoPruneConfig(ConfigSerializable): + """DAG Block Nas Config.""" + + num_samples = 100 + tuner = 'GP' # GP | RF | RandSearch | hebo + objective_keys = 'accuracy' + space_range = [50, 99] + space_key = 'prune_d_rate' # prune_d_rate | prune_rate + each_rung_samples = num_samples + strategy = None # l1 + prune_type = 'prune' # prune | mask + hps_handler = None # progressive + + +@ClassFactory.register(ClassType.SEARCH_ALGORITHM) +class AutoPrune(SearchAlgorithm): + """Auto Prune Base Class.""" + + config = AutoPruneConfig() + + def __init__(self, search_space=None, model=None, **kwargs): + super().__init__(search_space, **kwargs) + self.model = model or ModelZoo().get_model(PipeStepConfig.model.model_desc, + PipeStepConfig.model.pretrained_model_file) + self.search_space = self.encode(self.search_space) + self.sample_count = 0 + if self.config.tuner == 'hebo': + from vega.algorithms.hpo.sha_base.hebo_adaptor import HeboAdaptor + self.tuner = HeboAdaptor(self.search_space) + else: + self.tuner = TunerBuilder(search_space=self.search_space, tuner=self.config.tuner) + + def encode(self, search_space, space_key=None, space_range=None): + """Encode searchspace.""" + space_key = space_key or self.config.space_key + space_range = space_range or self.config.space_range + search_space = search_space or SpaceSet().load([(space_key, 'INT', space_range)]) + space_set = SpaceSet() + item = search_space.get("hyperparameters")[0] + for name, module in named_pruned_modules(self.model): + space_set.add("{}.{}".format(name, item.get("key")), space_type=item.get("type"), + space_range=item.get("range")) + # first conv not pruned. + space_set.pop(0) + return space_set.search_space + + @classmethod + def decode(cls, sample): + """Decode desc.""" + return sample.get("worker_id"), None, sample.get("encoded_desc"), dict(objective_keys=cls.config.objective_keys) + + def search(self): + """Search a desc.""" + desc = self.do_search() + self.sample_count += 1 + desc['trainer.epochs'] = self.sample_count // self.config.each_rung_samples - 1 + return dict(worker_id=self.sample_count - 1, encoded_desc=dict(desc)) + + def do(self, show_desc=False): + """Prune once.""" + desc = self.search().get("encoded_desc") + model = compress(self.model, desc, self.config.strategy, self.config.prune_type) + if show_desc: + return model, desc + return model + + def update(self, records): + """Update records.""" + features = records.get('hps') + if 'trainer.epochs' in features: + features.pop("trainer.epochs") + labels = records.get('rewards') + labels = labels[0] if isinstance(labels, list) else labels + self.tuner.add(features, labels) + + def do_search(self): + """Search desc until match the expected ratio.""" + if self.config.hps_handler == 'progressive': + self.search_space.handler = ProgressiveHandler(self.sample_count, self.config.num_samples) + return self.tuner.propose()[0] + + @property + def is_completed(self): + """Check is completed.""" + return self.sample_count >= self.config.num_samples + + def get_best(self, show_desc=False): + """Get best score and hps.""" + if show_desc: + return self.tuner._best_score, self.tuner._best_hyperparams + return self.tuner._best_score + + @staticmethod + @callbacks("init_trainer") + def prune(trainer, logs=None): + """Define prune function to init trainer callback.""" + logging.info("model prune hps: {}".format(trainer.hps)) + trainer.model = compress(trainer.model, trainer.hps) + + +class ProgressiveHandler(object): + """Define a Handler for search space.""" + + def __init__(self, curr, max_samples): + self.rate = curr / max_samples + logging.debug("Progress search space rate: {}".format(self.rate)) + + def __call__(self, low, high): + """Call sample to change search space range.""" + low = high - (high - low) * self.rate + return low, high diff --git a/vega/algorithms/compression/auto_prune/pruning.py b/vega/algorithms/compression/auto_prune/pruning.py new file mode 100644 index 0000000..16942eb --- /dev/null +++ b/vega/algorithms/compression/auto_prune/pruning.py @@ -0,0 +1,286 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Prune model.""" +import copy + +import vega +from vega.common import ClassType, ClassFactory + +if vega.is_torch_backend(): + import torch + + +def is_conv2d(module): + """Determine Conv2d.""" + # depth-wise convolution not in pruned search space. + return isinstance(module, torch.nn.Conv2d) and not is_depth_wise_conv(module) + + +def is_depth_wise_conv(module): + """Determine Conv2d.""" + if hasattr(module, "groups"): + return module.groups != 1 and module.in_channels == module.out_channels + elif hasattr(module, "group"): + return module.group != 1 and module.in_channels == module.out_channels + + +def mask_conv2d_out_channels(module, value): + """Mask out channels of Conv2d.""" + if not value: + return + out_channels_idx = [idx for idx, value in enumerate(value) if value == 0] + if not out_channels_idx: + return + module.weight.data[out_channels_idx, :, :, :] = 0 + + +def mask_conv2d_in_channels(module, value): + """Mask in channels of conv2d.""" + if not value: + return + in_channels_idx = [idx for idx, value in enumerate(value) if value == 0] + if not in_channels_idx: + return + module.weight.data[:, in_channels_idx, :, :] = 0 + + +def mask_conv2d(module, c_in, c_out): + """Mask conv2d.""" + if not isinstance(module, torch.nn.Conv2d): + return + mask_conv2d_in_channels(module, c_in) + mask_conv2d_out_channels(module, c_out) + + +def mask_linear(module, value): + """Mask linear.""" + if not isinstance(module, torch.nn.Linear) or not value or sum(value) == len(value): + return + idx_in = [idx for idx, value in enumerate(value) if value == 0] + if not idx_in: + return + module.weight.data[:, idx_in] = 0 + + +def mask_batch_norm(module, value): + """Prune Batch Norm.""" + if not isinstance(module, torch.nn.BatchNorm2d) or not value: + return + idx = [idx for idx, value in enumerate(value) if value == 0] + if not idx: + return + weights = {**module._parameters, **module._buffers} + if 'num_batches_tracked' in weights: + weights.pop('num_batches_tracked') + for name, weight in weights.items(): + if name == 'running_mean': + module.running_mean.data[idx] = 0 + elif name == 'running_var': + module.running_var.data[idx] = 0 + elif name == 'weight': + module.weight.data[idx] = 0 + elif name == 'bias': + module.bias.data[idx] = 0 + + +def prune_conv2d_out_channels(module, value): + """Prune out channels of Conv2d.""" + if not value: + return + module.out_channels = sum(value) + if sum(value) != module.out_channels: + raise ValueError("Outchannel is wrong.") + out_channels_idx = [idx for idx, value in enumerate(value) if value == 1] + for name, weight in module._parameters.items(): + if weight is None: + continue + if name == 'weight': + module.weight.data = weight[out_channels_idx, :, :, :] + elif name == 'bias': + module.bias.data = weight[out_channels_idx] + + +def prune_conv2d_in_channels(module, value): + """Prune in channels of conv2d.""" + if not value: + return + module.in_channels = sum(value) + if sum(value) != module.in_channels: + raise ValueError("Inchannel is wrong.") + in_channels_idx = [idx for idx, value in enumerate(value) if value == 1] + for name, weight in module._parameters.items(): + if weight is None or name != 'weight': + continue + if hasattr(module, "groups") and module.groups != 1: + # group and depth-wise convolution + # todo: not working on BINARY_CODE mode, mask code must be divisible by weight + module.groups = module.in_channels // weight.shape[1] + else: + prune_weight = weight[:, in_channels_idx, :, :] + module.weight.data = prune_weight + + +def prune_conv2d(module, c_in, c_out): + """prune conv2d.""" + if not isinstance(module, torch.nn.Conv2d): + return + prune_conv2d_in_channels(module, c_in) + prune_conv2d_out_channels(module, c_out) + + +def prune_linear(module, value): + """Prune linear.""" + if not isinstance(module, torch.nn.Linear) or not value or sum(value) == len(value): + return + if module.in_features == len(value): + module.in_features = sum(value) + else: + module.in_features = module.in_features // len(value) * sum(value) + if sum(value) == module.in_features: + idx_in = [idx for idx, value in enumerate(value) if value == 1] + else: + idx_in = [idx for idx, value in enumerate([1] * module.in_features)] + module.weight.data = module.weight.data[:, idx_in] + + +def prune_batch_norm(module, value): + """Prune Batch Norm.""" + if not isinstance(module, torch.nn.BatchNorm2d) or not value: + return + module.num_features = sum(value) + if sum(value) != module.num_features: + raise ValueError("Features is wrong.") + idx = [idx for idx, value in enumerate(value) if value == 1] + weights = {**module._parameters, **module._buffers} + if 'num_batches_tracked' in weights: + weights.pop('num_batches_tracked') + for name, weight in weights.items(): + prune_weight = weight[idx] + if name == 'running_mean': + module.running_mean.data = prune_weight + elif name == 'running_var': + module.running_var.data = prune_weight + elif name == 'weight': + module.weight.data = prune_weight + elif name == 'bias': + module.bias.data = prune_weight + + +def prune_dag_model(model): + """Prune Dag model.""" + for name, node in model.named_nodes(): + prune_conv2d(node.module, node.c_in, node.c_out) + prune_batch_norm(node.module, node.c_out) + prune_linear(node.module, node.c_in) + if node.module_type == 'torch_tensor_view': + if node.c_in and len(node.c_in) != sum(node.c_in) and node.saved_args and len(node.saved_args) > 1: + node.saved_args = tuple([node.saved_args[0], node.saved_args[1] // len(node.c_in) * sum(node.c_in)]) + return model + + +def prune_model(model, dag): + """Prune Dag model.""" + for name, module in model.named_modules(): + node = dag.module_map.get(name) + if not node: + continue + prune_conv2d(module, node.c_in, node.c_out) + prune_batch_norm(module, node.c_out) + prune_linear(module, node.c_in) + return model + + +def mask_model(model, dag): + """Mask model.""" + for name, module in model.named_modules(): + node = dag.module_map.get(name) + if not node: + continue + mask_conv2d(module, node.c_in, node.c_out) + mask_batch_norm(module, node.c_out) + mask_linear(module, node.c_in) + return model + + +def named_pruned_modules(model): + """Get call pruned modules.""" + for name, module in model.named_modules(): + if is_conv2d(module): + yield name, module + + +def decode(model, desc, strategy=None): + """Decode desc into mask code.""" + mask_code_desc = {} + trans = MaskCodeTransformer(strategy) + for name, rate in desc.items(): + node_name = '.'.join(name.split('.')[:-1]) + arch_type = name.split('.')[-1] + if node_name not in model.module_map: + continue + node_channels = model.module_map[node_name].module.out_channels + if arch_type == 'prune_d_rate': + select_idx = round(node_channels * rate / 100 / 16) * 16 + select_idx = select_idx if select_idx > 16 else node_channels + else: + select_idx = node_channels * rate // 100 + mask_code_desc[node_name + '.out_channels'] = trans(model, select_idx, node_channels, node_name) + return mask_code_desc + + +class MaskCodeTransformer(object): + """Transform mask code.""" + + def __init__(self, strategy=''): + self.strategy = strategy + + def __call__(self, model, select_idx, node_channels, node_name): + """Transform.""" + if self.strategy == 'kf_scale': + beta = kf_scale_dict.get(node_name + ".kf_scale").cpu() + next_node = model.module_map[node_name].child_nodes[0] + bn_weight = 1 + if next_node.module_type == "BatchNorm2d": + bn_weight = next_node.module.weight.data.abs().cpu() + score = bn_weight * (beta - (1 - beta)).squeeze() + _, idx = score.sort() + idx = idx.numpy().tolist() + idx.reverse() + select_idx = idx[:select_idx] + return [1 if idx in select_idx else 0 for idx in range(node_channels)] + elif self.strategy == 'l1': + node = model.module_map[node_name] + weight = node.module.weight.data.cpu() + l1_norm = torch.norm(weight.view(len(weight), -1), p=1, dim=1) + _, idx = l1_norm.sort() + idx = idx.numpy().tolist() + idx.reverse() + select_idx = idx[:select_idx] + return [1 if idx in select_idx else 0 for idx in range(node_channels)] + else: + return [1 if idx < select_idx else 0 for idx in range(node_channels)] + + +def compress(model, desc, strategy=None, prune_type='prune'): + """Do compress model.""" + dag_cls = ClassFactory.get_cls(ClassType.NETWORK, "DAGFactory") + dag_model = dag_cls(model=copy.deepcopy(model)).get_model() + desc = decode(dag_model, desc, strategy) + dag_model.insight_node_relations(desc) + if prune_type == 'mask': + return mask_model(copy.deepcopy(model), dag_model) + return prune_model(copy.deepcopy(model), dag_model) diff --git a/vega/algorithms/compression/prune_dag/__init__.py b/vega/algorithms/compression/prune_dag/__init__.py deleted file mode 100644 index 6433b10..0000000 --- a/vega/algorithms/compression/prune_dag/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .prune_dag import PruneDAGSearchSpace, AdaptiveBatchNormalizationCallback, SCOPDAGSearchSpace -from .knockoff_callback import KnockoffFeaturesCallback - -__all__ = ["PruneDAGSearchSpace", "AdaptiveBatchNormalizationCallback", "SCOPDAGSearchSpace", - "KnockoffFeaturesCallback"] diff --git a/vega/algorithms/compression/prune_dag/dag_relations.py b/vega/algorithms/compression/prune_dag/dag_relations.py deleted file mode 100644 index 7b9574a..0000000 --- a/vega/algorithms/compression/prune_dag/dag_relations.py +++ /dev/null @@ -1,126 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""Dag relations.""" -import vega - -if vega.is_torch_backend(): - import torch - from torch.nn import Conv2d, Linear -elif vega.is_ms_backend(): - from mindspore.nn import Conv2d, Dense as Linear - - -def is_conv2d(module): - """Determine Conv2d.""" - # depth-wise convolution not in pruned search space. - return isinstance(module, Conv2d) and not is_depth_wise_conv(module) - - -def is_depth_wise_conv(module): - """Determine Conv2d.""" - if hasattr(module, "groups"): - return module.groups != 1 and module.in_channels == module.out_channels - elif hasattr(module, "group"): - return module.group != 1 and module.in_channels == module.out_channels - - -def is_connection_node(node): - """Determine is connection node.""" - return node.is_operator_conn_module or len(node.child_nodes) > 1 or node.module_type == 'torch_func_cat' - - -def reset_c_out_node(node): - """Determine is connection node.""" - if isinstance(node.module, Linear): - return None - else: - return node.c_out - - -def check_and_export_model(pruned_model, dummy_input): - """Check and export model to onnx file.""" - dummy_input = dummy_input or torch.ones(1, 3, 224, 224) - torch.onnx.export(pruned_model, dummy_input, "pruned.onnx") - - -def sub_blocks_relation_search(in_node, c_out=None): - """Search relations of blocks.""" - nodes_in_block = [] - c_nodes = [in_node] - while c_nodes: - node = c_nodes.pop() - nodes_in_block.append(node) - if isinstance(node.module, Conv2d): - continue - for parent_node in node.parent_nodes: - if is_connection_node(parent_node): - c_out = parent_node.c_out - else: - c_nodes.append(parent_node) - for node in nodes_in_block: - if not isinstance(node.module, Conv2d): - node.c_in = c_out - node.c_out = c_out - return nodes_in_block - - -def sub_cat_relation_search(in_node, c_out=None): - """Search relations of blocks.""" - nodes_in_block = [] - c_nodes = [in_node] - while c_nodes: - node = c_nodes.pop() - nodes_in_block.append(node) - for parent_node in node.parent_nodes: - if not is_connection_node(parent_node): - c_nodes.append(parent_node) - if vega.is_torch_backend(): - for node in nodes_in_block: - if is_conv2d(node.module): - break - if not isinstance(node.module, Conv2d): - node.c_in = c_out - node.c_out = c_out - elif vega.is_ms_backend(): - for node in nodes_in_block[1:]: - if node.child_nodes: - node.c_out = node.child_nodes[0].c_in - if not isinstance(node.module, Conv2d): - node.c_in = node.c_out - return nodes_in_block - - -def node_relations_search(model, desc): - """Search relations of dag node.""" - for name, node in model.named_nodes(): - c_out = desc.get(node.name + '.out_channels') - if c_out and not node.c_out: - node.c_out = c_out - else: - for parent_node in node.parent_nodes: - if node.module_type == 'torch_func_cat': - cat_c_outs = [] - for n in node.parent_nodes: - cat_c_outs.extend(n.c_out) - node.c_out = cat_c_outs - else: - node.c_out = min(parent_node.c_out, node.c_out) if node.c_out else parent_node.c_out - if is_connection_node(parent_node): - break - node.c_out = reset_c_out_node(node) - for child_node in node.child_nodes: - if not child_node.c_in: - child_node.c_in = node.c_out - if is_connection_node(node): - if node.module_type == 'torch_func_cat': - sub_cat_relation_search(node, node.c_out) - else: - sub_blocks_relation_search(node, node.c_out) - return model diff --git a/vega/algorithms/compression/prune_dag/kncokoff_generator.py b/vega/algorithms/compression/prune_dag/kncokoff_generator.py deleted file mode 100644 index 2f4ae2d..0000000 --- a/vega/algorithms/compression/prune_dag/kncokoff_generator.py +++ /dev/null @@ -1,140 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""This is a class for KFGenerator.""" -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch.nn import init -import math - - -class KFGenerator(nn.Module): - """KFGenerator class.""" - - def __init__(self, weight_file_path=None): - super().__init__() - self.kf_class = ResNetGenerator() - self.kf_class.load_state_dict(torch.load(weight_file_path)['model']) - - def forward(self, data): - """Generate a new dataset.""" - with torch.no_grad(): - kf_weights = torch.empty(len(data), 128, dtype=torch.float32).normal_().cuda() - kf = self.kf_class(kf_weights) - return torch.nn.functional.interpolate(kf, size=data.shape[3]) - - -def _upsample(x): - h, w = x.size()[2:] - return F.interpolate(x, size=(h * 2, w * 2), mode='bilinear', align_corners=False) - - -class Block(nn.Module): - """Generator Block.""" - - def __init__(self, in_ch, out_ch, h_ch=None, ksize=3, pad=1, - activation=F.relu, upsample=False, num_classes=0): - super(Block, self).__init__() - - self.activation = activation - self.upsample = upsample - self.learnable_sc = in_ch != out_ch or upsample - if h_ch is None: - h_ch = out_ch - self.num_classes = num_classes - - # Register layrs - self.c1 = nn.Conv2d(in_ch, h_ch, ksize, 1, pad) - self.c2 = nn.Conv2d(h_ch, out_ch, ksize, 1, pad) - - self.b1 = nn.BatchNorm2d(in_ch) - self.b2 = nn.BatchNorm2d(h_ch) - if self.learnable_sc: - self.c_sc = nn.Conv2d(in_ch, out_ch, 1) - - def _initialize(self): - init.xavier_uniform_(self.c1.weight.tensor, gain=math.sqrt(2)) - init.xavier_uniform_(self.c2.weight.tensor, gain=math.sqrt(2)) - if self.learnable_sc: - init.xavier_uniform_(self.c_sc.weight.tensor, gain=1) - - def forward(self, x, y=None, z=None, **kwargs): - """Call forward function.""" - return self.shortcut(x) + self.residual(x, y, z) - - def shortcut(self, x, **kwargs): - """Call short cut.""" - if self.learnable_sc: - if self.upsample: - h = _upsample(x) - h = self.c_sc(h) - return h - else: - return x - - def residual(self, x, y=None, z=None, **kwargs): - """Do residual.""" - if y is not None: - h = self.b1(x, y, **kwargs) - else: - h = self.b1(x) - h = self.activation(h) - if self.upsample: - h = _upsample(h) - h = self.c1(h) - if y is not None: - h = self.b2(h, y, **kwargs) - else: - h = self.b2(h) - return self.c2(self.activation(h)) - - -class ResNetGenerator(nn.Module): - """Generator generates 64x64.""" - - def __init__(self, num_features=64, dim_z=128, bottom_width=4, - activation=F.relu, num_classes=0, distribution='normal'): - super(ResNetGenerator, self).__init__() - self.num_features = num_features - self.dim_z = dim_z - self.bottom_width = bottom_width - self.activation = activation - self.num_classes = num_classes - self.distribution = distribution - - self.l1 = nn.Linear(dim_z, 16 * num_features * bottom_width ** 2) - - self.block2 = Block(num_features * 16, num_features * 8, - activation=activation, upsample=True, - num_classes=num_classes) - self.block3 = Block(num_features * 8, num_features * 4, - activation=activation, upsample=True, - num_classes=num_classes) - self.block4 = Block(num_features * 4, num_features * 2, - activation=activation, upsample=True, - num_classes=num_classes) - self.block5 = Block(num_features * 2, num_features, - activation=activation, upsample=True, - num_classes=num_classes) - self.b6 = nn.BatchNorm2d(num_features) - self.conv6 = nn.Conv2d(num_features, 3, 1, 1) - - def _initialize(self): - init.xavier_uniform_(self.l1.weight.tensor) - init.xavier_uniform_(self.conv7.weight.tensor) - - def forward(self, z, y=None, **kwargs): - """Call forward function.""" - h = self.l1(z).view(z.size(0), -1, self.bottom_width, self.bottom_width) - for i in range(2, 6): - h = getattr(self, 'block{}'.format(i))(h, y, **kwargs) - h = self.activation(self.b6(h)) - return torch.tanh(self.conv6(h)) diff --git a/vega/algorithms/compression/prune_dag/knockoff_callback.py b/vega/algorithms/compression/prune_dag/knockoff_callback.py deleted file mode 100644 index f871ee6..0000000 --- a/vega/algorithms/compression/prune_dag/knockoff_callback.py +++ /dev/null @@ -1,133 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""This is KnockoffFeaturesCallback.""" -import logging -import torch -from torch.nn.parameter import Parameter - -import vega -from vega.common import ClassFactory, ClassType -from vega.modules.module import Module -from vega.trainer.callbacks.callback import Callback -from .kncokoff_generator import KFGenerator -from vega.common.general import General - - -@ClassFactory.register(ClassType.CALLBACK) -class KnockoffFeaturesCallback(Callback): - """Knock Off features call back.""" - - def __init__(self): - """Initialize ModelCheckpoint callback.""" - super(KnockoffFeaturesCallback, self).__init__() - self.priority = 210 - - def init_trainer(self, logs=None): - """Be called before train.""" - if not vega.is_torch_backend(): - return - logging.info("Start Kf scale training.") - first_conv = True - for name, module in self.trainer.model.named_modules(): - if not isinstance(module, torch.nn.Conv2d): - continue - if first_conv and module.kernel_size == (7, 7): - first_conv = False - else: - change_module(self.trainer.model, name, KfConv2d(module)) - - setattr(self.trainer.model, "generator", KFGenerator(self.trainer.config.generator_model_file)) - for name, params in self.trainer.model.named_parameters(): - if "kf_scale" in name: - params.requires_grad = True - else: - params.requires_grad = False - self.trainer.model.cuda() - - def before_train(self, logs=None): - """Run before train.""" - if isinstance(self.trainer.model, torch.nn.DataParallel): - self.trainer.model.module.generator.eval() - else: - self.trainer.model.generator.eval() - - def generate_input_hook(module, inputs): - """Define hook function to generate dataset.""" - data = inputs[0] - if isinstance(module, torch.nn.DataParallel): - input_list = [] - kf_input = module.module.generator(data) - ngpu = General.devices_per_trainer - num_pgpu = data.shape[0] // ngpu - for igpu in range(ngpu): - input_list.append(torch.cat([data[igpu * num_pgpu: (igpu + 1) * num_pgpu], - kf_input[igpu * num_pgpu:(igpu + 1) * num_pgpu]], dim=0)) - return torch.cat(input_list, dim=0) - return torch.cat((data, module.generator(data)), dim=0) - - def split_kf_output_hook(module, inputs, result): - """Define hook function to split output.""" - if isinstance(module, torch.nn.DataParallel): - output_list = [] - ngpu = General.devices_per_trainer - num_pgpu = result.shape[0] // ngpu - for igpu in range(ngpu): - output_list.append(result[igpu * num_pgpu * 2: igpu * num_pgpu * 2 + num_pgpu]) - return torch.cat(output_list, dim=0) - return result[: result.size(0) // 2, :] - - self.trainer.model.register_forward_pre_hook(generate_input_hook) - self.trainer.model.register_forward_hook(split_kf_output_hook) - - def after_train_step(self, batch_index, logs=None): - """Clamp kf scale.""" - for module in self.trainer.model.modules(): - if isinstance(module, KfConv2d): - module.kf_scale.data.clamp_(min=0, max=1) - - def after_train(self, logs=None): - """Save kf scale model.""" - self._save_model() - - def _save_model(self): - if vega.is_torch_backend(): - state_dict = {k: v for k, v in self.trainer.model.state_dict().items() if "kf_scale" in k} - torch.save(state_dict, self.trainer.weights_file) - - -def change_module(model, name, entity): - """Chane modules.""" - if not entity: - return - tokens = name.split('.') - attr_name = tokens[-1] - parent_names = tokens[:-1] - for s in parent_names: - model = getattr(model, s) - setattr(model, attr_name, entity) - - -@ClassFactory.register(ClassType.NETWORK) -class KfConv2d(Module): - """Knock off Conv2d.""" - - def __init__(self, org_cong): - super(KfConv2d, self).__init__() - self.conv = org_cong - self.kf_scale = Parameter(torch.ones(1, org_cong.out_channels, 1, 1).cuda()) - self.kf_scale.data.fill_(0.5) - - def forward(self, x): - """Call forward functions.""" - x = self.conv(x) - if self.training: - num_ori = int(x.shape[0] // 2) - x = torch.cat([self.kf_scale * x[:num_ori] + (1 - self.kf_scale) * x[num_ori:], x[num_ori:]], dim=0) - return x diff --git a/vega/algorithms/compression/prune_dag/prune_dag.py b/vega/algorithms/compression/prune_dag/prune_dag.py deleted file mode 100644 index da47252..0000000 --- a/vega/algorithms/compression/prune_dag/prune_dag.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""This is Operator SearchSpace.""" -import copy -import logging -import os -import vega -from vega.common import ClassFactory, ClassType -from vega.common.general import TaskConfig -from vega.trainer.callbacks import Callback -from vega.core.search_space import SearchSpace -from vega.core.pipeline.conf import PipeStepConfig -from .prune_events import prune_dag_model -from .dag_relations import node_relations_search, is_conv2d -from vega.model_zoo import ModelZoo -from vega.common.parameter_sharing import ParameterSharing - - -@ClassFactory.register(ClassType.SEARCHSPACE) -class PruneDAGSearchSpace(SearchSpace): - """Prune SearchSpace.""" - - @classmethod - def get_space(self, desc): - """Get model and input.""" - self.model = ModelZoo().get_model(PipeStepConfig.model.model_desc, PipeStepConfig.model.pretrained_model_file) - arch_params_key = '{}.out_channels' - search_space = [dict(key=arch_params_key.format(name), type="HALF", range=[module.out_channels]) - for name, module in self.model.named_modules() if is_conv2d(module)] - return {"hyperparameters": search_space} - - @classmethod - def to_desc(self, desc): - """Decode to model desc.""" - pruned_model = copy.deepcopy(self.model) - node_relations_search(pruned_model, desc) - prune_dag_model(pruned_model) - PipeStepConfig.model.pretrained_model_file = ParameterSharing().push(pruned_model, 'pruned_weights') - return pruned_model.to_desc() - - -@ClassFactory.register(ClassType.SEARCHSPACE) -class SCOPDAGSearchSpace(SearchSpace): - """SCOP DAG SearchSpace.""" - - @classmethod - def get_space(self, desc): - """Get model and input.""" - self.model = ModelZoo().get_model(PipeStepConfig.model.model_desc, PipeStepConfig.model.pretrained_model_file) - if not desc.get("hyperparameters"): - raise ValueError("hyperparameters should be config in SCOPDAGSearchSpace.") - search_space = [] - for item in desc.get("hyperparameters"): - arch_params_key = "{}." + item.get("key") - arch_type = item.get("type") - arch_type_range = item.get("range") - search_space.extend([dict(key=arch_params_key.format(name), type=arch_type, range=arch_type_range) - for name, module in self.model.named_modules() if is_conv2d(module)]) - # first conv not pruned. - search_space.pop(0) - return {"hyperparameters": search_space} - - @classmethod - def to_desc(self, desc): - """Decode to model desc.""" - pruned_model = copy.deepcopy(self.model) - desc = self._decode_fn(pruned_model, desc) - node_relations_search(pruned_model, desc) - prune_dag_model(pruned_model) - PipeStepConfig.model.pretrained_model_file = ParameterSharing().push(pruned_model, 'pruned_weights') - return pruned_model.to_desc() - - @classmethod - def _decode_fn(self, model, desc): - mask_code_desc = {} - kf_scale_dict = self._load_kf_scale() - if kf_scale_dict: - logging.info("Start prune with kf scale.") - for name, rate in desc.items(): - node_name = '.'.join(name.split('.')[:-1]) - arch_type = name.split('.')[-1] - if node_name not in model.module_map: - continue - node_channels = model.module_map[node_name].module.out_channels - if arch_type == 'prune_d_rate': - select_idx = round(node_channels * rate / 100 / 16) * 16 - select_idx = select_idx if select_idx > 16 else node_channels - else: - select_idx = node_channels * rate // 100 - if kf_scale_dict: - beta = kf_scale_dict.get(node_name + ".kf_scale").cpu() - next_node = model.module_map[node_name].child_nodes[0] - bn_weight = 1 - if next_node.module_type == "BatchNorm2d": - bn_weight = next_node.module.weight.data.abs().cpu() - score = bn_weight * (beta - (1 - beta)).squeeze() - _, idx = score.sort() - pruned_idx = idx[select_idx:].numpy().tolist() - idx_code = [1 if idx in pruned_idx else 0 for idx in range(node_channels)] - else: - idx_code = [1 if idx < select_idx else 0 for idx in range(node_channels)] - mask_code_desc[node_name + '.out_channels'] = idx_code - return mask_code_desc - - @classmethod - def _load_kf_scale(cls): - if not PipeStepConfig.model.kf_sacle_file: - return - import torch - file_path = PipeStepConfig.model.kf_sacle_file - file_path = file_path.replace("{local_base_path}", os.path.join(TaskConfig.local_base_path, TaskConfig.task_id)) - return torch.load(file_path) - - -@ClassFactory.register(ClassType.CALLBACK) -class AdaptiveBatchNormalizationCallback(Callback): - """Adaptive Batch Normalization.""" - - def before_train(self, logs=None): - """Freeze Conv2D and BatchNorm.""" - if not vega.is_torch_backend(): - return - import torch - for name, module in self.trainer.model.named_modules(): - if isinstance(module, torch.nn.Conv2d): - for name, parameter in module.named_parameters(): - parameter.requires_grad_(False) - elif isinstance(module, torch.nn.BatchNorm2d): - module.weight.requires_grad = False - module.bias.requires_grad = False - learnable_params = [param for param in self.trainer.model.parameters() if param.requires_grad] - logging.info("Adaptive BatchNormalization learnable params size: {}".format(len(learnable_params))) diff --git a/vega/algorithms/compression/prune_dag/prune_events.py b/vega/algorithms/compression/prune_dag/prune_events.py deleted file mode 100644 index e91b13c..0000000 --- a/vega/algorithms/compression/prune_dag/prune_events.py +++ /dev/null @@ -1,102 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Prune DAG model.""" -import vega - -if vega.is_torch_backend(): - import torch - - -def prune_conv2d_out_channels(module, value): - """Prune out channels of Conv2d.""" - assert sum(value) == module.out_channels - out_channels_idx = [idx for idx, value in enumerate(value) if value == 1] - for name, weight in module._parameters.items(): - if weight is None: - continue - if name == 'weight': - module.weight.data = weight[out_channels_idx, :, :, :] - elif name == 'bias': - module.bias.data = weight[out_channels_idx] - - -def prune_conv2d_in_channels(module, value): - """Prune in channels of conv2d.""" - assert sum(value) == module.in_channels - in_channels_idx = [idx for idx, value in enumerate(value) if value == 1] - for name, weight in module._parameters.items(): - if weight is None or name != 'weight': - continue - if hasattr(module, "groups") and module.groups != 1: - # group and depth-wise convolution - # todo: not working on BINARY_CODE mode, mask code must be divisible by weight - module.groups = module.in_channels // weight.shape[1] - else: - prune_weight = weight[:, in_channels_idx, :, :] - module.weight.data = prune_weight - - -def prune_linear(module, value): - """Prune linear.""" - if sum(value) == module.in_features: - idx_in = [idx for idx, value in enumerate(value) if value == 1] - else: - idx_in = [idx for idx, value in enumerate([1] * module.in_features)] - module.weight.data = module.weight.data[:, idx_in] - - -def prune_batch_norm(module, value): - """Prune Batch Norm.""" - assert sum(value) == module.num_features - idx = [idx for idx, value in enumerate(value) if value == 1] - weights = {**module._parameters, **module._buffers} - if 'num_batches_tracked' in weights: - weights.pop('num_batches_tracked') - for name, weight in weights.items(): - prune_weight = weight[idx] - if name == 'running_mean': - module.running_mean.data = prune_weight - elif name == 'running_var': - module.running_var.data = prune_weight - elif name == 'weight': - module.weight.data = prune_weight - elif name == 'bias': - module.bias.data = prune_weight - - -def prune_dag_model(model): - """Prune Dag model.""" - for name, node in model.named_nodes(): - if isinstance(node.module, torch.nn.Conv2d): - if node.c_in: - node.module.in_channels = sum(node.c_in) - prune_conv2d_in_channels(node.module, node.c_in) - if node.c_out: - node.module.out_channels = sum(node.c_out) - prune_conv2d_out_channels(node.module, node.c_out) - elif isinstance(node.module, torch.nn.BatchNorm2d): - if node.c_in: - node.module.num_features = sum(node.c_in) - node.c_out = node.c_in - prune_batch_norm(node.module, node.c_in) - elif isinstance(node.module, torch.nn.Linear): - if node.c_in: - if sum(node.c_in) == len(node.c_in): - continue - if node.module.in_features == len(node.c_in): - node.module.in_features = sum(node.c_in) - else: - node.module.in_features = node.module.in_features // len(node.c_in) * sum(node.c_in) - prune_linear(node.module, node.c_in) - elif node.module_type == 'torch_tensor_view': - if node.c_in and len(node.c_in) != sum(node.c_in) and node.saved_args and len(node.saved_args) > 1: - node.saved_args = tuple([node.saved_args[0], node.saved_args[1] // len(node.c_in) * sum(node.c_in)]) - return model diff --git a/vega/algorithms/compression/prune_ea/conf.py b/vega/algorithms/compression/prune_ea/conf.py index 6c1d9cf..889f9f4 100644 --- a/vega/algorithms/compression/prune_ea/conf.py +++ b/vega/algorithms/compression/prune_ea/conf.py @@ -1,14 +1,20 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Defined Configs.""" +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Defined Prune-EA configs.""" from vega.core.search_algs import EAConfig from vega.common import ConfigSerializable diff --git a/vega/algorithms/compression/prune_ea/prune_codec.py b/vega/algorithms/compression/prune_ea/prune_codec.py index c136de3..6955ca5 100644 --- a/vega/algorithms/compression/prune_ea/prune_codec.py +++ b/vega/algorithms/compression/prune_ea/prune_codec.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Codec of Prune EA.""" import copy diff --git a/vega/algorithms/compression/prune_ea/prune_ea.py b/vega/algorithms/compression/prune_ea/prune_ea.py index f3b3600..5cd6878 100644 --- a/vega/algorithms/compression/prune_ea/prune_ea.py +++ b/vega/algorithms/compression/prune_ea/prune_ea.py @@ -1,21 +1,27 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Evolution Algorithm used to prune model.""" import logging import random import numpy as np -from .conf import PruneConfig from vega.common import ClassFactory, ClassType from vega.report import ReportServer from vega.core.search_algs import SearchAlgorithm +from .conf import PruneConfig @ClassFactory.register(ClassType.SEARCH_ALGORITHM) @@ -80,7 +86,6 @@ def search(self): if self.random_count < self.random_samples: self.random_count += 1 desc = self._random_sample() - # desc.update({"trainer.codec": dict(desc)}) return self.random_count, desc records = ReportServer().get_pareto_front_records(self.step_name, self.num_individual) codes = [record.desc.get('backbone').get('encoding') for record in records] @@ -93,10 +98,8 @@ def search(self): else: encoding1, encoding2 = random.sample(codes, 2) choice = random.randint(0, 1) - # mutate if choice == 0: encoding_new = self.mutatation(encoding1) - # crossover else: encoding_new, _ = self.crossover(encoding1, encoding2) self.ea_count += 1 diff --git a/vega/algorithms/compression/prune_ea/prune_search_space.py b/vega/algorithms/compression/prune_ea/prune_search_space.py index 907bcb6..0bc27e7 100644 --- a/vega/algorithms/compression/prune_ea/prune_search_space.py +++ b/vega/algorithms/compression/prune_ea/prune_search_space.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Check and Define Prune Model SearchSpace.""" diff --git a/vega/algorithms/compression/prune_ea/prune_trainer_callback.py b/vega/algorithms/compression/prune_ea/prune_trainer_callback.py index 64baf28..5c7075c 100644 --- a/vega/algorithms/compression/prune_ea/prune_trainer_callback.py +++ b/vega/algorithms/compression/prune_ea/prune_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Trainer for searching pruned model.""" import logging diff --git a/vega/algorithms/compression/prune_ea_mobilenet/conf.py b/vega/algorithms/compression/prune_ea_mobilenet/conf.py index 6c1d9cf..adb0b11 100644 --- a/vega/algorithms/compression/prune_ea_mobilenet/conf.py +++ b/vega/algorithms/compression/prune_ea_mobilenet/conf.py @@ -1,14 +1,20 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Defined Configs.""" +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Prune EA MobileNet Configs.""" from vega.core.search_algs import EAConfig from vega.common import ConfigSerializable diff --git a/vega/algorithms/compression/prune_ea_mobilenet/prune_mobilenet_codec.py b/vega/algorithms/compression/prune_ea_mobilenet/prune_mobilenet_codec.py index 177698b..485dbc4 100644 --- a/vega/algorithms/compression/prune_ea_mobilenet/prune_mobilenet_codec.py +++ b/vega/algorithms/compression/prune_ea_mobilenet/prune_mobilenet_codec.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Codec of Prune EA.""" import copy diff --git a/vega/algorithms/compression/prune_ea_mobilenet/prune_trainer_callback.py b/vega/algorithms/compression/prune_ea_mobilenet/prune_trainer_callback.py index 4b3cb96..0215919 100644 --- a/vega/algorithms/compression/prune_ea_mobilenet/prune_trainer_callback.py +++ b/vega/algorithms/compression/prune_ea_mobilenet/prune_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Trainer for searching pruned model.""" import copy diff --git a/vega/algorithms/compression/quant_ea/conf.py b/vega/algorithms/compression/quant_ea/conf.py index 24a951c..6f36ab5 100644 --- a/vega/algorithms/compression/quant_ea/conf.py +++ b/vega/algorithms/compression/quant_ea/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" diff --git a/vega/algorithms/compression/quant_ea/quant_codec.py b/vega/algorithms/compression/quant_ea/quant_codec.py index 6bd89ee..02cd36e 100644 --- a/vega/algorithms/compression/quant_ea/quant_codec.py +++ b/vega/algorithms/compression/quant_ea/quant_codec.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Codec for searching quantization model.""" import copy diff --git a/vega/algorithms/compression/quant_ea/quant_ea.py b/vega/algorithms/compression/quant_ea/quant_ea.py index 154fe57..c41dc32 100644 --- a/vega/algorithms/compression/quant_ea/quant_ea.py +++ b/vega/algorithms/compression/quant_ea/quant_ea.py @@ -1,21 +1,27 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """SEARCH_ALGORITHM for searching quantization model.""" import logging import random import numpy as np -from .conf import QuantConfig from vega.common import ClassFactory, ClassType from vega.report import ReportServer from vega.core.search_algs import SearchAlgorithm +from .conf import QuantConfig @ClassFactory.register(ClassType.SEARCH_ALGORITHM) diff --git a/vega/algorithms/compression/quant_ea/quant_trainer_callback.py b/vega/algorithms/compression/quant_ea/quant_trainer_callback.py index afb2267..545a4ad 100644 --- a/vega/algorithms/compression/quant_ea/quant_trainer_callback.py +++ b/vega/algorithms/compression/quant_ea/quant_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """TrainWorker for searching quantization model.""" import logging diff --git a/vega/algorithms/data_augmentation/__init__.py b/vega/algorithms/data_augmentation/__init__.py index 467ca6a..7e4b7d0 100644 --- a/vega/algorithms/data_augmentation/__init__.py +++ b/vega/algorithms/data_augmentation/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import data augmentation algorithms.""" diff --git a/vega/algorithms/data_augmentation/common/pba.py b/vega/algorithms/data_augmentation/common/pba.py index 61917ae..cf470b6 100644 --- a/vega/algorithms/data_augmentation/common/pba.py +++ b/vega/algorithms/data_augmentation/common/pba.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ Population Based Augmentation Algorithm. @@ -30,8 +36,8 @@ import shutil import numpy as np import pandas as pd -from .status_type import StatusType from vega.common import FileOps +from .status_type import StatusType class PBA(object): diff --git a/vega/algorithms/data_augmentation/common/status_type.py b/vega/algorithms/data_augmentation/common/status_type.py index f21a058..84b1bae 100644 --- a/vega/algorithms/data_augmentation/common/status_type.py +++ b/vega/algorithms/data_augmentation/common/status_type.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """StatusType.""" from enum import Enum diff --git a/vega/algorithms/data_augmentation/cyclesr/cyclesr_trainer_callback.py b/vega/algorithms/data_augmentation/cyclesr/cyclesr_trainer_callback.py index 8a4f187..bc92f92 100644 --- a/vega/algorithms/data_augmentation/cyclesr/cyclesr_trainer_callback.py +++ b/vega/algorithms/data_augmentation/cyclesr/cyclesr_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is the class for cyclesr trainworker.""" import datetime @@ -14,10 +20,10 @@ import itertools import os import time -import numpy as np -import torch import json +import torch from tensorboardX import SummaryWriter +import numpy as np import vega from vega.datasets import Adapter from vega.datasets.common.dataset import Dataset @@ -32,9 +38,8 @@ try: import horovod.torch as hvd -except Exception: - # logging.warning("horovod not been installed, {}".format(str(e))) - pass +except Exception as e: + logging.debug("horovod not been installed, {}".format(str(e))) # data-processing module from .utils import find_best_PSNR @@ -137,27 +142,24 @@ def _train(self, trainloader, writer, epoch, model, print_freq=10): model.set_mode('train') step = epoch * num_batches + batch_idx data_time.update(time.time() - end) - ####################################################################### model.optimize_CycleSR(data, epoch) # caclute psnr during training losses = model.get_current_losses() for name, loss in losses.items(): - writer.add_scalar("loss" + name, loss, step) # store the loss in tensorboardX + writer.add_scalar("loss" + name, loss, step) batchsize = data['X'].size(0) loss_sr.update(losses['SR'], batchsize) loss_ga.update(losses['G'], batchsize) loss_cycA.update(losses['rec_X'], batchsize) - # logging.info("HR: {}. SR: {}".format(model.HR.data)) if epoch < 6: psnr = self.batch_psnr(model.HR.data, model.G_SR.data) else: psnr = self.batch_psnr(model.HR.data, model.SR.data) PSNRes.update(psnr, batchsize) - writer.add_scalar("training_psnr", psnr, step) # store the psnr + writer.add_scalar("training_psnr", psnr, step) batch_time.update(time.time() - end) - # print result if (batch_idx + 1) % print_freq == 0: if not vega.is_gpu_device() or (vega.is_gpu_device() and self.trainer.is_chief): logging.info('[epoch {0},iter {1}/{2}]\t' @@ -214,11 +216,11 @@ def _evalGAN(self, model, imgs, epoch, writer): real_Y = img['Y'].cuda() HR = img['HR'].cuda() fake_Y = model.netG(real_X) # G(X) - rec_X = model.netF(fake_Y) # F(G(X)) + rec_X = model.netF(fake_Y) # F(G(X)) fake_X = model.netF(real_Y) # F(Y) - rec_Y = model.netG(fake_X) # G(F(Y)) + rec_Y = model.netG(fake_X) # G(F(Y)) - G_SR = model.netSR(fake_Y) # SR(G(X)) + G_SR = model.netSR(fake_Y) # SR(G(X)) writer.add_image("G_SR" + str(i), TensorNorm((G_SR[0])), epoch) writer.add_image("HR" + str(i), TensorNorm((HR[0])), epoch) writer.add_image("Real_bicubic" + str(i), TensorNorm((real_X[0])), epoch) @@ -319,7 +321,6 @@ def _train_loop(self): self._train(train_dataloader, writer, epoch, self.model, print_freq=self.cfg.print_freq) train_time += round(time.time() - start_train_time) # validation - ############################################################################### if epoch % self.cfg.eval_epoch == 0: logging.info("==> Validng") self._evalGAN(self.model, val_gan_imgs, epoch, writer) @@ -359,9 +360,7 @@ def _save_checkpoint(self, epoch, best=False): self.worker_path, "model_{}.pth".format(name)) if vega.is_gpu_device() and torch.cuda.is_available(): - # torch.save(net.module.cpu().state_dict(), save_path) torch.save(net.module.state_dict(), save_path) - # net.cuda() if best: torch.save(net.module.state_dict(), best_file) elif vega.is_npu_device(): diff --git a/vega/algorithms/data_augmentation/cyclesr/utils.py b/vega/algorithms/data_augmentation/cyclesr/utils.py index 9d168ac..f1b94b7 100644 --- a/vega/algorithms/data_augmentation/cyclesr/utils.py +++ b/vega/algorithms/data_augmentation/cyclesr/utils.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This module contains simple helper functions.""" from __future__ import absolute_import @@ -162,5 +168,4 @@ def find_best_PSNR(HR, SR, crop_size): del psnr max_psnr = PSNR_list.max() del PSNR_list - # index = (flatten_index//PSNR_list.shape[1],flatten_index%PSNR_list.shape[1]) return max_psnr.cpu().item() diff --git a/vega/algorithms/data_augmentation/pba_conf.py b/vega/algorithms/data_augmentation/pba_conf.py index 7d42e1e..ce9836d 100644 --- a/vega/algorithms/data_augmentation/pba_conf.py +++ b/vega/algorithms/data_augmentation/pba_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/data_augmentation/pba_hpo.py b/vega/algorithms/data_augmentation/pba_hpo.py index c646586..f7c5df0 100644 --- a/vega/algorithms/data_augmentation/pba_hpo.py +++ b/vega/algorithms/data_augmentation/pba_hpo.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined PBAHpo class.""" import os diff --git a/vega/algorithms/data_augmentation/pba_trainer_callback.py b/vega/algorithms/data_augmentation/pba_trainer_callback.py index c5f0ec4..c563aec 100644 --- a/vega/algorithms/data_augmentation/pba_trainer_callback.py +++ b/vega/algorithms/data_augmentation/pba_trainer_callback.py @@ -1,10 +1,16 @@ # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The trainer program for pba.""" import logging diff --git a/vega/algorithms/fully_train/__init__.py b/vega/algorithms/fully_train/__init__.py deleted file mode 100644 index 3ccc792..0000000 --- a/vega/algorithms/fully_train/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from vega.common.class_factory import ClassFactory - - -ClassFactory.lazy_register("vega.algorithms.fully_train", { - "resnet.resnet_trainer_callback": ["trainer:ResnetTrainer"] -}) diff --git a/vega/algorithms/fully_train/resnet/__init__.py b/vega/algorithms/fully_train/resnet/__init__.py deleted file mode 100644 index 45716d4..0000000 --- a/vega/algorithms/fully_train/resnet/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .resnet_trainer_callback import ResnetTrainer diff --git a/vega/algorithms/fully_train/resnet/resnet_trainer_callback.py b/vega/algorithms/fully_train/resnet/resnet_trainer_callback.py deleted file mode 100644 index 6c549ba..0000000 --- a/vega/algorithms/fully_train/resnet/resnet_trainer_callback.py +++ /dev/null @@ -1,166 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Resnet Trainer.""" - -import os -from mindspore import context -from mindspore import Tensor -from mindspore.train import Model as MsModel -from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor -from mindspore.parallel import set_algo_parameters -import vega -from vega.trainer.trainer_base import TrainerBase -from vega.common import ClassFactory, ClassType -import logging -from mindspore.communication.management import init as hccl_init -from mindspore.context import ParallelMode -from .src.resnet import resnet50 as resnet -from .src.dataset import create_dataset2 as create_dataset -from .src.CrossEntropySmooth import CrossEntropySmooth -from .src.lr_generator import get_lr -from mindspore.nn.optim import Momentum -import mindspore.nn as nn -import mindspore.common.initializer as weight_init -from vega.datasets.conf.dataset import DatasetConfig -from vega.trainer.callbacks.ms_callbacks import EvalCallBack -from vega.common.general import General - - -def init_weight(net): - """Initialize weight.""" - for _, cell in net.cells_and_names(): - if isinstance(cell, nn.Conv2d): - cell.weight.set_data(weight_init.initializer(weight_init.XavierUniform(), - cell.weight.shape, - cell.weight.dtype)) - if isinstance(cell, nn.Dense): - cell.weight.set_data(weight_init.initializer(weight_init.TruncatedNormal(), - cell.weight.shape, - cell.weight.dtype)) - - -def init_group_prams(net): - """Initialize group_prams.""" - decayed_params = [] - no_decayed_params = [] - for param in net.trainable_params(): - if 'beta' not in param.name and 'gamma' not in param.name and 'bias' not in param.name: - decayed_params.append(param) - else: - no_decayed_params.append(param) - - group_params = [{'params': decayed_params, 'weight_decay': 0.0001}, - {'params': no_decayed_params}, - {'order_params': net.trainable_params()}] - return group_params - - -@ClassFactory.register(ClassType.TRAINER) -class ResnetTrainer(TrainerBase): - """Trainer mindspore class.""" - - def build(self): - """Build the trainer by assembling the necessary components.""" - logging.debug("Trainer Config: {}".format(self.config)) - self._init_hps() - self.do_validation = False - self.use_syncbn = self.config.syncbn - if self.use_syncbn and vega.is_torch_backend(): - import apex - self.model = apex.parallel.convert_syncbn_model(self.model) - if not self.train_loader: - self.train_loader = self._init_dataloader(mode='train') - if not self.valid_loader: - self.valid_loader = self._init_dataloader(mode='val') - self.batch_num_train = len(self.train_loader) - self.batch_num_valid = len(self.valid_loader) - logging.debug("Trainer Config: {}".format(self.config)) - config = DatasetConfig().to_dict() - self.train_config = config['_class_data'].train - self.valid_config = config['_class_data'].val - self.loss = CrossEntropySmooth(sparse=self.config.loss.params.sparse, - reduction=self.config.loss.params.reduction, - smooth_factor=self.config.loss.params.smooth_factor, - num_classes=self.train_config.n_class) - self.metric_name = self.config.metric.type - - self.train_metrics = None - self.valid_metrics = self._init_metrics() - self.ms_metrics = self.valid_metrics() if isinstance(self.valid_metrics(), dict) else { - self.metric_name: self.valid_metrics()} - - self.net = resnet(class_num=self.train_config.n_class) - init_weight(net=self.net) - from mindspore.train.loss_scale_manager import FixedLossScaleManager - self.loss_scale = FixedLossScaleManager(self.config.loss_scale, drop_overflow_update=False) - - def init_env(self): - """Construct the trainer of Resnet.""" - super().init_env() - self._init_ms_context() - self._init_distributed_setting() - - def _train_epoch(self): - """Construct the trainer of Resnet.""" - try: - dataset = create_dataset(dataset_path=self.train_config.data_path + '/train', do_train=True, - repeat_num=1, batch_size=self.train_config.batch_size, target='Ascend', - distribute=True) - step_size = dataset.get_dataset_size() - - lr = Tensor( - get_lr(lr_init=self.config.lr_scheduler.params.lr_init, lr_end=self.config.lr_scheduler.params.lr_end, - lr_max=self.config.lr_scheduler.params.lr_max, - warmup_epochs=0, total_epochs=self.config.epochs, steps_per_epoch=step_size, - lr_decay_mode=self.config.lr_scheduler.params.lr_decay_mode)) - group_params = init_group_prams(self.net) - opt = Momentum(group_params, lr, self.config.optimizer.params.momentum, loss_scale=self.config.loss_scale) - - self.ms_model = MsModel(network=self.net, - loss_fn=self.loss, - optimizer=opt, - loss_scale_manager=self.loss_scale, - amp_level="O2", keep_batchnorm_fp32=False, - acc_level="O0", - metrics=self.ms_metrics) - config_ck = CheckpointConfig(save_checkpoint_steps=self.config.save_steps, keep_checkpoint_max=1) - save_path = self.get_local_worker_path(self.step_name, self.worker_id) - ckpoint_cb = ModelCheckpoint(config=config_ck, directory=save_path) - loss_cb = LossMonitor() - self.valid_loader = create_dataset(dataset_path=self.valid_config.data_path + '/val', do_train=False, - batch_size=self.valid_config.batch_size, - target='Ascend') - eval_cb = EvalCallBack(self.ms_model, self.valid_loader, self.dataset_sink_mode, self) - callback_list = [ckpoint_cb, loss_cb, eval_cb] - - self.ms_model.train(epoch=self.epochs, - train_dataset=dataset, - callbacks=callback_list, - dataset_sink_mode=False) - except RuntimeError as e: - logging.warning(f"failed to train the model, skip it, message: {str(e)}") - - def _init_distributed_setting(self): - """Construct the trainer of Resnet.""" - context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True) - set_algo_parameters(elementwise_op_strategy_follow=True) - context.set_auto_parallel_context(all_reduce_fusion_config=self.config.all_reduce_fusion_config) - - def _init_ms_context(self): - mode = General.ms_execute_mode - logging.info(f"Run train/val in mode: {mode}.") - if vega.is_npu_device(): - context.set_context(mode=mode, device_target="Ascend", device_id=int(os.environ["DEVICE_ID"])) - else: - context.set_context(mode=mode, device_target="CPU") - - self.dataset_sink_mode = General.dataset_sink_mode - logging.info(f"Dataset_sink_mode:{self.dataset_sink_mode}.") diff --git a/vega/algorithms/fully_train/resnet/src/CrossEntropySmooth.py b/vega/algorithms/fully_train/resnet/src/CrossEntropySmooth.py deleted file mode 100644 index 372ea92..0000000 --- a/vega/algorithms/fully_train/resnet/src/CrossEntropySmooth.py +++ /dev/null @@ -1,35 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""Define loss function for network.""" -import mindspore.nn as nn -from mindspore import Tensor -from mindspore.common import dtype as mstype -from mindspore.nn.loss.loss import _Loss as LossBase -from mindspore.ops import functional as F -from mindspore.ops import operations as P - - -class CrossEntropySmooth(LossBase): - """CrossEntropy.""" - - def __init__(self, sparse=True, reduction='mean', smooth_factor=0., num_classes=1000): - super(CrossEntropySmooth, self).__init__() - self.onehot = P.OneHot() - self.sparse = sparse - self.on_value = Tensor(1.0 - smooth_factor, mstype.float32) - self.off_value = Tensor(1.0 * smooth_factor / (num_classes - 1), mstype.float32) - self.ce = nn.SoftmaxCrossEntropyWithLogits(reduction=reduction) - - def construct(self, logit, label): - """Construct the trainer of Resnet.""" - if self.sparse: - label = self.onehot(label, F.shape(logit)[1], self.on_value, self.off_value) - loss = self.ce(logit, label) - return loss diff --git a/vega/algorithms/fully_train/resnet/src/dataset.py b/vega/algorithms/fully_train/resnet/src/dataset.py deleted file mode 100644 index 6620146..0000000 --- a/vega/algorithms/fully_train/resnet/src/dataset.py +++ /dev/null @@ -1,96 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""Create train or eval dataset.""" -import os -import mindspore.common.dtype as mstype -import mindspore.dataset as ds -import mindspore.dataset.vision.c_transforms as C -import mindspore.dataset.transforms.c_transforms as C2 -from mindspore.communication.management import init, get_rank, get_group_size - - -def create_dataset2(dataset_path, do_train, repeat_num=1, batch_size=32, target="Ascend", distribute=False, - enable_cache=False, cache_session_id=None): - """ - Create a train or eval imagenet2012 dataset for resnet50. - - Args: - dataset_path(string): the path of dataset. - do_train(bool): whether dataset is used for train or eval. - repeat_num(int): the repeat times of dataset. Default: 1 - batch_size(int): the batch size of dataset. Default: 32 - target(str): the device target. Default: Ascend - distribute(bool): data for distribute or not. Default: False - enable_cache(bool): whether tensor caching service is used for eval. Default: False - cache_session_id(int): If enable_cache, cache session_id need to be provided. Default: None - - Returns: - dataset - """ - if target == "Ascend": - device_num = int(os.environ.get('RANK_SIZE')) - rank_id = int(os.environ.get('RANK_ID')) - else: - if distribute: - init() - rank_id = get_rank() - device_num = get_group_size() - else: - device_num = 1 - - if device_num == 1: - data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True) - else: - data_set = ds.ImageFolderDataset(dataset_path, num_parallel_workers=12, shuffle=True, - num_shards=device_num, shard_id=rank_id) - - image_size = 224 - mean = [0.485 * 255, 0.456 * 255, 0.406 * 255] - std = [0.229 * 255, 0.224 * 255, 0.225 * 255] - - # define map operations - if do_train: - trans = [ - C.RandomCropDecodeResize(image_size, scale=(0.08, 1.0), ratio=(0.75, 1.333)), - C.RandomHorizontalFlip(prob=0.5), - C.Normalize(mean=mean, std=std), - C.HWC2CHW() - ] - else: - trans = [ - C.Decode(), - C.Resize(256), - C.CenterCrop(image_size), - C.Normalize(mean=mean, std=std), - C.HWC2CHW() - ] - - type_cast_op = C2.TypeCast(mstype.int32) - - data_set = data_set.map(operations=trans, input_columns="image", num_parallel_workers=12) - # only enable cache for eval - if do_train: - enable_cache = False - if enable_cache: - if not cache_session_id: - raise ValueError("A cache session_id must be provided to use cache.") - eval_cache = ds.DatasetCache(session_id=int(cache_session_id), size=0) - data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12, - cache=eval_cache) - else: - data_set = data_set.map(operations=type_cast_op, input_columns="label", num_parallel_workers=12) - - # apply batch operations - data_set = data_set.batch(batch_size, drop_remainder=True) - - # apply dataset repeat operation - data_set = data_set.repeat(repeat_num) - - return data_set diff --git a/vega/algorithms/fully_train/resnet/src/lr_generator.py b/vega/algorithms/fully_train/resnet/src/lr_generator.py deleted file mode 100644 index 7e2924c..0000000 --- a/vega/algorithms/fully_train/resnet/src/lr_generator.py +++ /dev/null @@ -1,235 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""Learning rate generator.""" -import math -import numpy as np - - -def _generate_steps_lr(lr_init, lr_max, total_steps, warmup_steps): - """ - Apply three steps decay to generate learning rate array. - - Args: - lr_init(float): init learning rate. - lr_max(float): max learning rate. - total_steps(int): all steps in training. - warmup_steps(int): all steps in warmup epochs. - - Returns: - np.array, learning rate array. - """ - decay_epoch_index = [0.3 * total_steps, 0.6 * total_steps, 0.8 * total_steps] - lr_each_step = [] - for i in range(total_steps): - if i < warmup_steps: - lr = lr_init + (lr_max - lr_init) * i / warmup_steps - else: - if i < decay_epoch_index[0]: - lr = lr_max - elif i < decay_epoch_index[1]: - lr = lr_max * 0.1 - elif i < decay_epoch_index[2]: - lr = lr_max * 0.01 - else: - lr = lr_max * 0.001 - lr_each_step.append(lr) - return lr_each_step - - -def _generate_poly_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps): - """ - Apply polynomial decay to generate learning rate array. - - Args: - lr_init(float): init learning rate. - lr_end(float): end learning rate - lr_max(float): max learning rate. - total_steps(int): all steps in training. - warmup_steps(int): all steps in warmup epochs. - - Returns: - np.array, learning rate array. - """ - lr_each_step = [] - if warmup_steps != 0: - inc_each_step = (float(lr_max) - float(lr_init)) / float(warmup_steps) - else: - inc_each_step = 0 - for i in range(total_steps): - if i < warmup_steps: - lr = float(lr_init) + inc_each_step * float(i) - else: - base = (1.0 - (float(i) - float(warmup_steps)) / (float(total_steps) - float(warmup_steps))) - lr = float(lr_max) * base * base - if lr < 0.0: - lr = 0.0 - lr_each_step.append(lr) - return lr_each_step - - -def _generate_cosine_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps): - """ - Apply cosine decay to generate learning rate array. - - Args: - lr_init(float): init learning rate. - lr_end(float): end learning rate - lr_max(float): max learning rate. - total_steps(int): all steps in training. - warmup_steps(int): all steps in warmup epochs. - - Returns: - np.array, learning rate array. - """ - decay_steps = total_steps - warmup_steps - lr_each_step = [] - for i in range(total_steps): - if i < warmup_steps: - lr_inc = (float(lr_max) - float(lr_init)) / float(warmup_steps) - lr = float(lr_init) + lr_inc * (i + 1) - else: - linear_decay = (total_steps - i) / decay_steps - cosine_decay = 0.5 * (1 + math.cos(math.pi * 2 * 0.47 * i / decay_steps)) - decayed = linear_decay * cosine_decay + 0.00001 - lr = lr_max * decayed - lr_each_step.append(lr) - return lr_each_step - - -def _generate_liner_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps): - """ - Apply liner decay to generate learning rate array. - - Args: - lr_init(float): init learning rate. - lr_end(float): end learning rate - lr_max(float): max learning rate. - total_steps(int): all steps in training. - warmup_steps(int): all steps in warmup epochs. - - Returns: - np.array, learning rate array. - """ - lr_each_step = [] - for i in range(total_steps): - if i < warmup_steps: - lr = lr_init + (lr_max - lr_init) * i / warmup_steps - else: - lr = lr_max - (lr_max - lr_end) * (i - warmup_steps) / (total_steps - warmup_steps) - lr_each_step.append(lr) - return lr_each_step - - -def get_lr(lr_init, lr_end, lr_max, warmup_epochs, total_epochs, steps_per_epoch, lr_decay_mode): - """ - Generate learning rate array. - - Args: - lr_init(float): init learning rate - lr_end(float): end learning rate - lr_max(float): max learning rate - warmup_epochs(int): number of warmup epochs - total_epochs(int): total epoch of training - steps_per_epoch(int): steps of one epoch - lr_decay_mode(string): learning rate decay mode, including steps, poly, cosine or liner(default) - - Returns: - np.array, learning rate array - """ - lr_each_step = [] - total_steps = steps_per_epoch * total_epochs - warmup_steps = steps_per_epoch * warmup_epochs - - if lr_decay_mode == 'steps': - lr_each_step = _generate_steps_lr(lr_init, lr_max, total_steps, warmup_steps) - elif lr_decay_mode == 'poly': - lr_each_step = _generate_poly_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps) - elif lr_decay_mode == 'cosine': - lr_each_step = _generate_cosine_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps) - else: - lr_each_step = _generate_liner_lr(lr_init, lr_end, lr_max, total_steps, warmup_steps) - - lr_each_step = np.array(lr_each_step).astype(np.float32) - return lr_each_step - - -def linear_warmup_lr(current_step, warmup_steps, base_lr, init_lr): - """Construct the trainer of Resnet.""" - lr_inc = (float(base_lr) - float(init_lr)) / float(warmup_steps) - lr = float(init_lr) + lr_inc * current_step - return lr - - -def warmup_cosine_annealing_lr(lr, steps_per_epoch, warmup_epochs, max_epoch=120, global_step=0): - """ - Generate learning rate array with cosine. - - Args: - lr(float): base learning rate - steps_per_epoch(int): steps size of one epoch - warmup_epochs(int): number of warmup epochs - max_epoch(int): total epochs of training - global_step(int): the current start index of lr array - Returns: - np.array, learning rate array - """ - base_lr = lr - warmup_init_lr = 0 - total_steps = int(max_epoch * steps_per_epoch) - warmup_steps = int(warmup_epochs * steps_per_epoch) - decay_steps = total_steps - warmup_steps - - lr_each_step = [] - for i in range(total_steps): - if i < warmup_steps: - lr = linear_warmup_lr(i + 1, warmup_steps, base_lr, warmup_init_lr) - else: - linear_decay = (total_steps - i) / decay_steps - cosine_decay = 0.5 * (1 + math.cos(math.pi * 2 * 0.47 * i / decay_steps)) - decayed = linear_decay * cosine_decay + 0.00001 - lr = base_lr * decayed - lr_each_step.append(lr) - - lr_each_step = np.array(lr_each_step).astype(np.float32) - learning_rate = lr_each_step[global_step:] - return learning_rate - - -def get_thor_lr(global_step, lr_init, decay, total_epochs, steps_per_epoch, decay_epochs=100): - """Get model_lr.""" - lr_each_step = [] - total_steps = steps_per_epoch * total_epochs - for i in range(total_steps): - epoch = (i + 1) / steps_per_epoch - base = (1.0 - float(epoch) / total_epochs) ** decay - lr_local = lr_init * base - if epoch >= decay_epochs: - lr_local = lr_local * 0.5 - if epoch >= decay_epochs + 1: - lr_local = lr_local * 0.5 - lr_each_step.append(lr_local) - current_step = global_step - lr_each_step = np.array(lr_each_step).astype(np.float32) - learning_rate = lr_each_step[current_step:] - return learning_rate - - -def get_thor_damping(global_step, damping_init, decay_rate, total_epochs, steps_per_epoch): - """Get model_damping.""" - damping_each_step = [] - total_steps = steps_per_epoch * total_epochs - for step in range(total_steps): - epoch = (step + 1) / steps_per_epoch - damping_here = damping_init * (decay_rate ** (epoch / 10)) - damping_each_step.append(damping_here) - current_step = global_step - damping_each_step = np.array(damping_each_step).astype(np.float32) - damping_now = damping_each_step[current_step:] - return damping_now diff --git a/vega/algorithms/fully_train/resnet/src/metric.py b/vega/algorithms/fully_train/resnet/src/metric.py deleted file mode 100644 index a968a20..0000000 --- a/vega/algorithms/fully_train/resnet/src/metric.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""evaluation metric.""" - -from mindspore.communication.management import GlobalComm -from mindspore.ops import operations as P -import mindspore.nn as nn -import mindspore.common.dtype as mstype - - -class ClassifyCorrectCell(nn.Cell): - r""" - Cell that returns correct count of the prediction in classification network. - - Args: - network (Cell): The network Cell. - - Inputs: - - **data** (Tensor) - Tensor of shape :math:`(N, \ldots)`. - - **label** (Tensor) - Tensor of shape :math:`(N, \ldots)`. - - Outputs: - Tuple, containing a scalar correct count of the prediction - - Examples: - >>> # For a defined network Net without loss function - >>> net = Net() - >>> eval_net = nn.ClassifyCorrectCell(net) - """ - - def __init__(self, network): - super(ClassifyCorrectCell, self).__init__(auto_prefix=False) - self._network = network - self.argmax = P.Argmax() - self.equal = P.Equal() - self.cast = P.Cast() - self.reduce_sum = P.ReduceSum() - self.allreduce = P.AllReduce(P.ReduceOp.SUM, GlobalComm.WORLD_COMM_GROUP) - - def construct(self, data, label): - """Construct the trainer of Resnet.""" - outputs = self._network(data) - y_pred = self.argmax(outputs) - y_pred = self.cast(y_pred, mstype.int32) - y_correct = self.equal(y_pred, label) - y_correct = self.cast(y_correct, mstype.float32) - y_correct = self.reduce_sum(y_correct) - total_correct = self.allreduce(y_correct) - return (total_correct,) - - -class DistAccuracy(nn.Metric): - r""" - Calculates the accuracy for classification data in distributed mode. - - Args: - eval_type (str): Metric to calculate the accuracy over a dataset, for classification (single-label). - - Examples: - >>> y_correct = Tensor(np.array([20])) - >>> metric = nn.DistAccuracy(batch_size=3, device_num=8) - >>> metric.clear() - >>> metric.update(y_correct) - >>> accuracy = metric.eval() - """ - - def __init__(self, batch_size, device_num): - super(DistAccuracy, self).__init__() - self.clear() - self.batch_size = batch_size - self.device_num = device_num - - def clear(self): - """Clear the internal evaluation result.""" - self._correct_num = 0 - self._total_num = 0 - - def update(self, *inputs): - """ - Update the internal evaluation result :math:`y_{pred}` and :math:`y`. - - Args: - inputs: Input `y_correct`. `y_correct` is a `scalar Tensor`. - `y_correct` is the right prediction count that gathered from all devices - it's a scalar in float type - - Raises: - ValueError: If the number of the input is not 1. - """ - if len(inputs) != 1: - raise ValueError('Distribute accuracy needs 1 input (y_correct), but got {}'.format(len(inputs))) - y_correct = self._convert_data(inputs[0]) - self._correct_num += y_correct - self._total_num += self.batch_size * self.device_num - - def eval(self): - """ - Compute the accuracy. - - Returns: - Float, the computed result. - - Raises: - RuntimeError: If the sample size is 0. - """ - if self._total_num == 0: - raise RuntimeError('Accuracy can not be calculated, because the number of samples is 0.') - return self._correct_num / self._total_num diff --git a/vega/algorithms/fully_train/resnet/src/momentum.py b/vega/algorithms/fully_train/resnet/src/momentum.py deleted file mode 100644 index 7e4b499..0000000 --- a/vega/algorithms/fully_train/resnet/src/momentum.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Momentum.""" -from mindspore.ops import functional as F, composite as C, operations as P -from mindspore.common.parameter import Parameter -from mindspore.common.tensor import Tensor -import mindspore.common.dtype as mstype -from mindspore._checkparam import Validator -from mindspore.nn.optim.optimizer import Optimizer - -_momentum_opt = C.MultitypeFuncGraph("momentum_opt") - - -@_momentum_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor") -def _tensor_run_opt_ext(opt, weight_decay, scale, momentum, learning_rate, gradient, weight, moment): - """Apply momentum optimizer to the weight parameter using Tensor.""" - success = F.depend(True, opt(weight_decay, scale, weight, moment, learning_rate, gradient, momentum)) - return success - - -class Momentum(Optimizer): - r""" - Implements the Momentum algorithm. - - Args: - params (Union[list[Parameter], list[dict]]): When the `params` is a list of `Parameter` which will be updated, - the element in `params` must be class `Parameter`. When the `params` is a list of `dict`, the "params", - "lr", "weight_decay" and "order_params" are the keys can be parsed. - - - params: Required. The value must be a list of `Parameter`. - - - lr: Optional. If "lr" in the keys, the value of corresponding learning rate will be used. - If not, the `learning_rate` in the API will be used. - - - weight_decay: Optional. If "weight_decay" in the keys, the value of corresponding weight decay - will be used. If not, the `weight_decay` in the API will be used. - - - order_params: Optional. If "order_params" in the keys, the value must be the order of parameters and - the order will be followed in optimizer. There are no other keys in the `dict` and the parameters which - in the value of 'order_params' must be in one of group parameters. - - learning_rate (Union[float, Tensor, Iterable, LearningRateSchedule]): A value or a graph for the learning rate. - When the learning_rate is an Iterable or a Tensor in a 1D dimension, use dynamic learning rate, then - the i-th step will take the i-th value as the learning rate. When the learning_rate is LearningRateSchedule, - use dynamic learning rate, the i-th learning rate will be calculated during the process of training - according to the formula of LearningRateSchedule. When the learning_rate is a float or a Tensor in a zero - dimension, use fixed learning rate. Other cases are not supported. The float learning rate must be - equal to or greater than 0. If the type of `learning_rate` is int, it will be converted to float. - momentum (float): Hyperparameter of type float, means momentum for the moving average. - It must be at least 0.0. - weight_decay (int, float): Weight decay (L2 penalty). It must be equal to or greater than 0.0. Default: 0.0. - loss_scale (int, float): A floating point value for the loss scale. It must be greater than 0.0. Default: 1.0. - use_nesterov (bool): Enable Nesterov momentum. Default: False. - - Inputs: - - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`. - - Outputs: - tuple[bool], all elements are True. - - Raises: - ValueError: If the momentum is less than 0.0. - TypeError: If the momentum is not a float or use_nesterov is not a bool. - - Supported Platforms: - ``GPU`` - - Examples: - >>> net = Net() - >>> #1) All parameters use the same learning rate and weight decay - >>> optim = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) - >>> - >>> #2) Use parameter groups and set different values - >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) - >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) - >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, - ... {'params': no_conv_params, 'lr': 0.01}, - ... {'order_params': net.trainable_params()}] - >>> optim = Momentum(group_params, learning_rate=0.1, momentum=0.9, weight_decay=0.0) - >>> # The conv_params's parameters will use a learning rate of default value 0.1 and a weight decay of 0.01. - >>> # The no_conv_params's parameters will use a learning rate of 0.01 and a weight decay of default value 0.0. - >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. - >>> - >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> model = Model(net, loss_fn=loss, optimizer=optim, metrics=None) - """ - - def __init__(self, params, learning_rate, momentum, weight_decay=0.0, loss_scale=1.0, use_nesterov=False): - super(Momentum, self).__init__(learning_rate, params, weight_decay, loss_scale) - Validator.check_value_type("momentum", momentum, [float], self.cls_name) - if isinstance(momentum, float) and momentum < 0.0: - raise ValueError("momentum should be at least 0.0, but got momentum {}".format(momentum)) - self.momentum = Parameter(Tensor(momentum, mstype.float32), name="momentum") - self.params = self.parameters - self.use_nesterov = Validator.check_bool(use_nesterov) - self.moments = self.params.clone(prefix="moments", init='zeros') - self.hyper_map = C.HyperMap() - # Use FusedWeightScaleApplyMomentum to avoid extra kernel launch. - self.opt = P.FusedWeightScaleApplyMomentum() - - def construct(self, gradients): - """Construct the trainer of Resnet.""" - params = self.params - moments = self.moments - weight_decay = Tensor(0.0, mstype.float32) - scale = Tensor(1.0, mstype.float32) - if self.exec_weight_decay: - weight_decay = self.weight_decay_tensor - if self.need_scale: - scale = self.reciprocal_scale - lr = self.get_lr() - if self.is_group_lr: - success = self.hyper_map(F.partial(_momentum_opt, self.opt, weight_decay, scale, self.momentum), - lr, gradients, params, moments) - else: - success = self.hyper_map(F.partial(_momentum_opt, self.opt, weight_decay, scale, self.momentum, lr), - gradients, params, moments) - return success diff --git a/vega/algorithms/fully_train/resnet/src/resnet.py b/vega/algorithms/fully_train/resnet/src/resnet.py deleted file mode 100644 index 4edb4cc..0000000 --- a/vega/algorithms/fully_train/resnet/src/resnet.py +++ /dev/null @@ -1,610 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""ResNet.""" -import math -import numpy as np -from scipy.stats import truncnorm -import mindspore.nn as nn -import mindspore.common.dtype as mstype -from mindspore.ops import operations as P -from mindspore.ops import functional as F -from mindspore.common.tensor import Tensor - - -def _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size): - """Construct the trainer of Resnet.""" - fan_in = in_channel * kernel_size * kernel_size - scale = 1.0 - scale /= max(1., fan_in) - stddev = (scale ** 0.5) / .87962566103423978 - mu, sigma = 0, stddev - weight = truncnorm(-2, 2, loc=mu, scale=sigma).rvs(out_channel * in_channel * kernel_size * kernel_size) - weight = np.reshape(weight, (out_channel, in_channel, kernel_size, kernel_size)) - return Tensor(weight, dtype=mstype.float32) - - -def _weight_variable(shape, factor=0.01): - """Construct the trainer of Resnet.""" - init_value = np.random.randn(*shape).astype(np.float32) * factor - return Tensor(init_value) - - -def calculate_gain(nonlinearity, param=None): - """Calculate gain.""" - linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d'] - res = 0 - if nonlinearity in linear_fns or nonlinearity == 'sigmoid': - res = 1 - elif nonlinearity == 'tanh': - res = 5.0 / 3 - elif nonlinearity == 'relu': - res = math.sqrt(2.0) - elif nonlinearity == 'leaky_relu': - if param is None: - negative_slope = 0.01 - elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float): - # True/False are instances of int, hence check above - negative_slope = param - else: - raise ValueError("negative_slope {} not a valid number".format(param)) - res = math.sqrt(2.0 / (1 + negative_slope ** 2)) - else: - raise ValueError("Unsupported nonlinearity {}".format(nonlinearity)) - return res - - -def _calculate_fan_in_and_fan_out(tensor): - """Calculate fan_in_and_fan_out.""" - dimensions = len(tensor) - if dimensions < 2: - raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions") - if dimensions == 2: # Linear - fan_in = tensor[1] - fan_out = tensor[0] - else: - num_input_fmaps = tensor[1] - num_output_fmaps = tensor[0] - receptive_field_size = 1 - if dimensions > 2: - receptive_field_size = tensor[2] * tensor[3] - fan_in = num_input_fmaps * receptive_field_size - fan_out = num_output_fmaps * receptive_field_size - return fan_in, fan_out - - -def _calculate_correct_fan(tensor, mode): - """Calculate correct_fan.""" - mode = mode.lower() - valid_modes = ['fan_in', 'fan_out'] - if mode not in valid_modes: - raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes)) - fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor) - return fan_in if mode == 'fan_in' else fan_out - - -def kaiming_normal(inputs_shape, a=0, mode='fan_in', nonlinearity='leaky_relu'): - """Construct the trainer of Resnet.""" - fan = _calculate_correct_fan(inputs_shape, mode) - gain = calculate_gain(nonlinearity, a) - std = gain / math.sqrt(fan) - return np.random.normal(0, std, size=inputs_shape).astype(np.float32) - - -def kaiming_uniform(inputs_shape, a=0., mode='fan_in', nonlinearity='leaky_relu'): - """Construct the trainer of Resnet.""" - fan = _calculate_correct_fan(inputs_shape, mode) - gain = calculate_gain(nonlinearity, a) - std = gain / math.sqrt(fan) - bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation - return np.random.uniform(-bound, bound, size=inputs_shape).astype(np.float32) - - -def _conv3x3(in_channel, out_channel, stride=1, use_se=False, res_base=False): - """Construct the trainer of Resnet.""" - if use_se: - weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=3) - else: - weight_shape = (out_channel, in_channel, 3, 3) - weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu')) - if res_base: - return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, - padding=1, pad_mode='pad', weight_init=weight) - return nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, - padding=0, pad_mode='same', weight_init=weight) - - -def _conv1x1(in_channel, out_channel, stride=1, use_se=False, res_base=False): - """Construct the trainer of Resnet.""" - if use_se: - weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=1) - else: - weight_shape = (out_channel, in_channel, 1, 1) - weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu')) - if res_base: - return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride, - padding=0, pad_mode='pad', weight_init=weight) - return nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride, - padding=0, pad_mode='same', weight_init=weight) - - -def _conv7x7(in_channel, out_channel, stride=1, use_se=False, res_base=False): - """Construct the trainer of Resnet.""" - if use_se: - weight = _conv_variance_scaling_initializer(in_channel, out_channel, kernel_size=7) - else: - weight_shape = (out_channel, in_channel, 7, 7) - weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu')) - if res_base: - return nn.Conv2d(in_channel, out_channel, - kernel_size=7, stride=stride, padding=3, pad_mode='pad', weight_init=weight) - return nn.Conv2d(in_channel, out_channel, - kernel_size=7, stride=stride, padding=0, pad_mode='same', weight_init=weight) - - -def _bn(channel, res_base=False): - """Construct the trainer of Resnet.""" - if res_base: - return nn.BatchNorm2d(channel, eps=1e-5, momentum=0.1, - gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1) - return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9, - gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1) - - -def _bn_last(channel): - """Construct the trainer of Resnet.""" - return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9, - gamma_init=0, beta_init=0, moving_mean_init=0, moving_var_init=1) - - -def _fc(in_channel, out_channel, use_se=False): - """Construct the trainer of Resnet.""" - if use_se: - weight = np.random.normal(loc=0, scale=0.01, size=out_channel * in_channel) - weight = Tensor(np.reshape(weight, (out_channel, in_channel)), dtype=mstype.float32) - else: - weight_shape = (out_channel, in_channel) - weight = Tensor(kaiming_uniform(weight_shape, a=math.sqrt(5))) - return nn.Dense(in_channel, out_channel, has_bias=True, weight_init=weight, bias_init=0) - - -class ResidualBlock(nn.Cell): - """ - ResNet V1 residual block definition. - - Args: - in_channel (int): Input channel. - out_channel (int): Output channel. - stride (int): Stride size for the first convolutional layer. Default: 1. - use_se (bool): Enable SE-ResNet50 net. Default: False. - se_block(bool): Use se block in SE-ResNet50 net. Default: False. - - Returns: - Tensor, output tensor. - - Examples: - >>> ResidualBlock(3, 256, stride=2) - """ - - expansion = 4 - - def __init__(self, - in_channel, - out_channel, - stride=1, - use_se=False, se_block=False): - super(ResidualBlock, self).__init__() - self.stride = stride - self.use_se = use_se - self.se_block = se_block - channel = out_channel // self.expansion - self.conv1 = _conv1x1(in_channel, channel, stride=1, use_se=self.use_se) - self.bn1 = _bn(channel) - if self.use_se and self.stride != 1: - self.e2 = nn.SequentialCell([_conv3x3(channel, channel, stride=1, use_se=True), _bn(channel), - nn.ReLU(), nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same')]) - else: - self.conv2 = _conv3x3(channel, channel, stride=stride, use_se=self.use_se) - self.bn2 = _bn(channel) - - self.conv3 = _conv1x1(channel, out_channel, stride=1, use_se=self.use_se) - self.bn3 = _bn_last(out_channel) - if self.se_block: - self.se_global_pool = P.ReduceMean(keep_dims=False) - self.se_dense_0 = _fc(out_channel, int(out_channel / 4), use_se=self.use_se) - self.se_dense_1 = _fc(int(out_channel / 4), out_channel, use_se=self.use_se) - self.se_sigmoid = nn.Sigmoid() - self.se_mul = P.Mul() - self.relu = nn.ReLU() - - self.down_sample = False - - if stride != 1 or in_channel != out_channel: - self.down_sample = True - self.down_sample_layer = None - - if self.down_sample: - if self.use_se: - if stride == 1: - self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, - stride, use_se=self.use_se), _bn(out_channel)]) - else: - self.down_sample_layer = nn.SequentialCell([nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same'), - _conv1x1(in_channel, out_channel, 1, - use_se=self.use_se), _bn(out_channel)]) - else: - self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride, - use_se=self.use_se), _bn(out_channel)]) - - def construct(self, x): - """Construct the trainer of Resnet.""" - identity = x - - out = self.conv1(x) - out = self.bn1(out) - out = self.relu(out) - if self.use_se and self.stride != 1: - out = self.e2(out) - else: - out = self.conv2(out) - out = self.bn2(out) - out = self.relu(out) - out = self.conv3(out) - out = self.bn3(out) - if self.se_block: - out_se = out - out = self.se_global_pool(out, (2, 3)) - out = self.se_dense_0(out) - out = self.relu(out) - out = self.se_dense_1(out) - out = self.se_sigmoid(out) - out = F.reshape(out, F.shape(out) + (1, 1)) - out = self.se_mul(out, out_se) - - if self.down_sample: - identity = self.down_sample_layer(identity) - - out = out + identity - out = self.relu(out) - - return out - - -class ResidualBlockBase(nn.Cell): - """ - ResNet V1 residual block definition. - - Args: - in_channel (int): Input channel. - out_channel (int): Output channel. - stride (int): Stride size for the first convolutional layer. Default: 1. - use_se (bool): Enable SE-ResNet50 net. Default: False. - se_block(bool): Use se block in SE-ResNet50 net. Default: False. - res_base (bool): Enable parameter setting of resnet18. Default: True. - - Returns: - Tensor, output tensor. - - Examples: - >>> ResidualBlockBase(3, 256, stride=2) - """ - - def __init__(self, - in_channel, - out_channel, - stride=1, - use_se=False, - se_block=False, - res_base=True): - super(ResidualBlockBase, self).__init__() - self.res_base = res_base - self.conv1 = _conv3x3(in_channel, out_channel, stride=stride, res_base=self.res_base) - self.bn1d = _bn(out_channel) - self.conv2 = _conv3x3(out_channel, out_channel, stride=1, res_base=self.res_base) - self.bn2d = _bn(out_channel) - self.relu = nn.ReLU() - - self.down_sample = False - if stride != 1 or in_channel != out_channel: - self.down_sample = True - - self.down_sample_layer = None - if self.down_sample: - self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride, - use_se=use_se, res_base=self.res_base), - _bn(out_channel, res_base)]) - - def construct(self, x): - """Construct the trainer of Resnet.""" - identity = x - - out = self.conv1(x) - out = self.bn1d(out) - out = self.relu(out) - - out = self.conv2(out) - out = self.bn2d(out) - - if self.down_sample: - identity = self.down_sample_layer(identity) - - out = out + identity - out = self.relu(out) - - return out - - -class ResNet(nn.Cell): - """ - ResNet architecture. - - Args: - block (Cell): Block for network. - layer_nums (list): Numbers of block in different layers. - in_channels (list): Input channel in each layer. - out_channels (list): Output channel in each layer. - strides (list): Stride size in each layer. - num_classes (int): The number of classes that the training images are belonging to. - use_se (bool): Enable SE-ResNet50 net. Default: False. - se_block(bool): Use se block in SE-ResNet50 net in layer 3 and layer 4. Default: False. - res_base (bool): Enable parameter setting of resnet18. Default: False. - - Returns: - Tensor, output tensor. - - Examples: - >>> ResNet(ResidualBlock, - >>> [3, 4, 6, 3], - >>> [64, 256, 512, 1024], - >>> [256, 512, 1024, 2048], - >>> [1, 2, 2, 2], - >>> 10) - """ - - def __init__(self, - block, - layer_nums, - in_channels, - out_channels, - strides, - num_classes, - use_se=False, - res_base=False): - super(ResNet, self).__init__() - - if not len(layer_nums) == len(in_channels) == len(out_channels) == 4: - raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!") - self.use_se = use_se - self.res_base = res_base - self.se_block = False - if self.use_se: - self.se_block = True - - if self.use_se: - self.conv1_0 = _conv3x3(3, 32, stride=2, use_se=self.use_se) - self.bn1_0 = _bn(32) - self.conv1_1 = _conv3x3(32, 32, stride=1, use_se=self.use_se) - self.bn1_1 = _bn(32) - self.conv1_2 = _conv3x3(32, 64, stride=1, use_se=self.use_se) - else: - self.conv1 = _conv7x7(3, 64, stride=2, res_base=self.res_base) - self.bn1 = _bn(64, self.res_base) - self.relu = P.ReLU() - - if self.res_base: - self.pad = nn.Pad(paddings=((0, 0), (0, 0), (1, 1), (1, 1))) - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid") - else: - self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same") - - self.layer1 = self._make_layer(block, - layer_nums[0], - in_channel=in_channels[0], - out_channel=out_channels[0], - stride=strides[0], - use_se=self.use_se) - self.layer2 = self._make_layer(block, - layer_nums[1], - in_channel=in_channels[1], - out_channel=out_channels[1], - stride=strides[1], - use_se=self.use_se) - self.layer3 = self._make_layer(block, - layer_nums[2], - in_channel=in_channels[2], - out_channel=out_channels[2], - stride=strides[2], - use_se=self.use_se, - se_block=self.se_block) - self.layer4 = self._make_layer(block, - layer_nums[3], - in_channel=in_channels[3], - out_channel=out_channels[3], - stride=strides[3], - use_se=self.use_se, - se_block=self.se_block) - - self.mean = P.ReduceMean(keep_dims=True) - self.flatten = nn.Flatten() - self.end_point = _fc(out_channels[3], num_classes, use_se=self.use_se) - - def _make_layer(self, block, layer_num, in_channel, out_channel, stride, use_se=False, se_block=False): - """ - Make stage network of ResNet. - - Args: - block (Cell): Resnet block. - layer_num (int): Layer number. - in_channel (int): Input channel. - out_channel (int): Output channel. - stride (int): Stride size for the first convolutional layer. - se_block(bool): Use se block in SE-ResNet50 net. Default: False. - Returns: - SequentialCell, the output layer. - - Examples: - >>> _make_layer(ResidualBlock, 3, 128, 256, 2) - """ - layers = [] - - resnet_block = block(in_channel, out_channel, stride=stride, use_se=use_se) - layers.append(resnet_block) - if se_block: - for _ in range(1, layer_num - 1): - resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se) - layers.append(resnet_block) - resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se, se_block=se_block) - layers.append(resnet_block) - else: - for _ in range(1, layer_num): - resnet_block = block(out_channel, out_channel, stride=1, use_se=use_se) - layers.append(resnet_block) - return nn.SequentialCell(layers) - - def construct(self, x): - """Construct the trainer of Resnet.""" - if self.use_se: - x = self.conv1_0(x) - x = self.bn1_0(x) - x = self.relu(x) - x = self.conv1_1(x) - x = self.bn1_1(x) - x = self.relu(x) - x = self.conv1_2(x) - else: - x = self.conv1(x) - x = self.bn1(x) - x = self.relu(x) - if self.res_base: - x = self.pad(x) - c1 = self.maxpool(x) - - c2 = self.layer1(c1) - c3 = self.layer2(c2) - c4 = self.layer3(c3) - c5 = self.layer4(c4) - - out = self.mean(c5, (2, 3)) - out = self.flatten(out) - out = self.end_point(out) - - return out - - -def resnet18(class_num=10): - """ - Get ResNet18 neural network. - - Args: - class_num (int): Class number. - - Returns: - Cell, cell instance of ResNet18 neural network. - - Examples: - >>> net = resnet18(10) - """ - return ResNet(ResidualBlockBase, - [2, 2, 2, 2], - [64, 64, 128, 256], - [64, 128, 256, 512], - [1, 2, 2, 2], - class_num, - res_base=True) - - -def resnet34(class_num=10): - """ - Get ResNet34 neural network. - - Args: - class_num (int): Class number. - - Returns: - Cell, cell instance of ResNet34 neural network. - - Examples: - >>> net = resnet18(10) - """ - return ResNet(ResidualBlockBase, - [3, 4, 6, 3], - [64, 64, 128, 256], - [64, 128, 256, 512], - [1, 2, 2, 2], - class_num, - res_base=True) - - -def resnet50(class_num=10): - """ - Get ResNet50 neural network. - - Args: - class_num (int): Class number. - - Returns: - Cell, cell instance of ResNet50 neural network. - - Examples: - >>> net = resnet50(10) - """ - return ResNet(ResidualBlock, - [3, 4, 6, 3], - [64, 256, 512, 1024], - [256, 512, 1024, 2048], - [1, 2, 2, 2], - class_num) - - -def se_resnet50(class_num=1001): - """ - Get SE-ResNet50 neural network. - - Args: - class_num (int): Class number. - - Returns: - Cell, cell instance of SE-ResNet50 neural network. - - Examples: - >>> net = se-resnet50(1001) - """ - return ResNet(ResidualBlock, - [3, 4, 6, 3], - [64, 256, 512, 1024], - [256, 512, 1024, 2048], - [1, 2, 2, 2], - class_num, - use_se=True) - - -def resnet101(class_num=1001): - """ - Get ResNet101 neural network. - - Args: - class_num (int): Class number. - - Returns: - Cell, cell instance of ResNet101 neural network. - - Examples: - >>> net = resnet101(1001) - """ - return ResNet(ResidualBlock, - [3, 4, 23, 3], - [64, 256, 512, 1024], - [256, 512, 1024, 2048], - [1, 2, 2, 2], - class_num) diff --git a/vega/algorithms/hpo/__init__.py b/vega/algorithms/hpo/__init__.py index c323339..50f82fe 100644 --- a/vega/algorithms/hpo/__init__.py +++ b/vega/algorithms/hpo/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import hpo algorithms.""" @@ -17,6 +23,7 @@ "bohb_hpo": ["BohbHpo"], "boss_hpo": ["BossHpo"], "random_hpo": ["RandomSearch"], + "grid_hpo": ["GridSearch"], "evolution_search": ["EvolutionAlgorithm"], "pbt_hpo": ["PBTHpo"], "pbt_trainer_callback": ["PbtTrainerCallback"], diff --git a/vega/algorithms/hpo/asha_conf.py b/vega/algorithms/hpo/asha_conf.py index a185fb5..39adc32 100644 --- a/vega/algorithms/hpo/asha_conf.py +++ b/vega/algorithms/hpo/asha_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/hpo/asha_hpo.py b/vega/algorithms/hpo/asha_hpo.py index 88f72a3..f66e28c 100644 --- a/vega/algorithms/hpo/asha_hpo.py +++ b/vega/algorithms/hpo/asha_hpo.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined AshaHpo class.""" diff --git a/vega/algorithms/hpo/bayes.py b/vega/algorithms/hpo/bayes.py index 1bc8b83..f20c65e 100644 --- a/vega/algorithms/hpo/bayes.py +++ b/vega/algorithms/hpo/bayes.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Bayes Search class.""" from vega.common import ClassFactory, ClassType diff --git a/vega/algorithms/hpo/bayes_conf.py b/vega/algorithms/hpo/bayes_conf.py index 461edad..f4af8c3 100644 --- a/vega/algorithms/hpo/bayes_conf.py +++ b/vega/algorithms/hpo/bayes_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/hpo/bohb_conf.py b/vega/algorithms/hpo/bohb_conf.py index b44ee8c..02aef03 100644 --- a/vega/algorithms/hpo/bohb_conf.py +++ b/vega/algorithms/hpo/bohb_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/hpo/bohb_hpo.py b/vega/algorithms/hpo/bohb_hpo.py index 22193c2..2e544f9 100644 --- a/vega/algorithms/hpo/bohb_hpo.py +++ b/vega/algorithms/hpo/bohb_hpo.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined BohbHpo class.""" from math import pow @@ -110,8 +116,8 @@ def get_iter_epoch_list(self, num_samples): iter_list.append(int( count_list[i] - (pow(eta, iter) - 1) / (eta - 1))) iter_list.sort(reverse=True) - for i in range(len(iter_list)): - temp_ep = int(min_epochs * pow(eta, i)) + for j in range(len(iter_list)): + temp_ep = int(min_epochs * pow(eta, j)) min_ep_list.append(temp_ep) iter_list_hl.append(iter_list) min_ep_list_hl.append(min_ep_list) diff --git a/vega/algorithms/hpo/boss_conf.py b/vega/algorithms/hpo/boss_conf.py index 7d404cb..10b27d5 100644 --- a/vega/algorithms/hpo/boss_conf.py +++ b/vega/algorithms/hpo/boss_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable @@ -39,6 +45,7 @@ class BossConfig(ConfigSerializable): policy = BossPolicyConfig objective_keys = 'accuracy' + tuner = "RF" # TPE | GP | RF @classmethod def rules(cls): diff --git a/vega/algorithms/hpo/boss_hpo.py b/vega/algorithms/hpo/boss_hpo.py index f3cb797..3e339a6 100644 --- a/vega/algorithms/hpo/boss_hpo.py +++ b/vega/algorithms/hpo/boss_hpo.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined BohbHpo class.""" from math import log, pow, sqrt @@ -34,7 +40,7 @@ def __init__(self, search_space=None, **kwargs): total_epochs = self.config.policy.total_epochs num_samples, max_epochs = self.design_parameter(total_epochs, repeat_times) self._max_samples = num_samples - self.hpo = BOSS(self.search_space, num_samples, max_epochs, repeat_times) + self.hpo = BOSS(self.search_space, num_samples, max_epochs, repeat_times, tuner=self.config.tuner) def design_parameter(self, total_epochs, repeat_times): """Design parameters based on total_epochs. @@ -56,7 +62,7 @@ def design_parameter(self, total_epochs, repeat_times): cn = int(sqrt(log(current_samples * 3 / 2))) min_epochs = current_epochs if cn != 1: - for i in range(cn - 1): + for _ in range(cn - 1): min_epochs *= eta while(current_samples > 0): valid_epochs = max(min_epochs, current_epochs) @@ -108,8 +114,8 @@ def get_iter_epoch_list(self, num_samples, repeat_times): iter_list.append(int( count_list[i] - (pow(eta, iter) - 1) / (eta - 1))) iter_list.sort(reverse=True) - for i in range(len(iter_list)): - temp_ep = int(min_epochs * pow(eta, i)) + for j in range(len(iter_list)): + temp_ep = int(min_epochs * pow(eta, j)) min_ep_list.append(temp_ep) iter_list_hl.append(iter_list) min_ep_list_hl.append(min_ep_list) diff --git a/vega/algorithms/hpo/ea/ga.py b/vega/algorithms/hpo/ea/ga.py index 2de12f1..0231569 100644 --- a/vega/algorithms/hpo/ea/ga.py +++ b/vega/algorithms/hpo/ea/ga.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Evolution.""" @@ -28,12 +34,6 @@ def __init__(self, search_space=None, random_samples=32, prob_crossover=0.6, pro self.random_samples = random_samples self.prob_crossover = prob_crossover self.prob_mutatation = prob_mutatation - # self.scores = { - # id: { - # "config": config, - # "score": score, - # }, - # } self.scores = {} self.sample_count = 0 diff --git a/vega/algorithms/hpo/evolution_conf.py b/vega/algorithms/hpo/evolution_conf.py index 0903a03..80b3661 100644 --- a/vega/algorithms/hpo/evolution_conf.py +++ b/vega/algorithms/hpo/evolution_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/hpo/evolution_search.py b/vega/algorithms/hpo/evolution_search.py index adcf654..ba16b76 100644 --- a/vega/algorithms/hpo/evolution_search.py +++ b/vega/algorithms/hpo/evolution_search.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """EvolutionAlgorithm.""" from collections import OrderedDict @@ -15,8 +21,8 @@ import numpy as np from vega.core.search_algs import SearchAlgorithm from vega.common import ClassFactory, ClassType -from .evolution_conf import EvolutionConfig from vega.report import ReportServer +from .evolution_conf import EvolutionConfig @ClassFactory.register(ClassType.SEARCH_ALGORITHM) diff --git a/vega/algorithms/hpo/grid_hpo.py b/vega/algorithms/hpo/grid_hpo.py new file mode 100644 index 0000000..9e91974 --- /dev/null +++ b/vega/algorithms/hpo/grid_hpo.py @@ -0,0 +1,37 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Defined GridSearch class.""" +from vega.common import ClassFactory, ClassType +from .random_hpo import RandomSearch + + +@ClassFactory.register(ClassType.SEARCH_ALGORITHM) +class GridSearch(RandomSearch): + """An Hpo of GridSearch.""" + + def __init__(self, search_space=None, **kwargs): + """Init GridSearch.""" + super(GridSearch, self).__init__(search_space, **kwargs) + self.sample_count = 0 + self.params = self.search_space.get_sample_space(gridding=True) + self.max_sample = len(self.params) + + def search(self): + """Search function, Not Implemented Yet.""" + param = self.search_space.decode(self.params[self.sample_count]) + self.sample_count += 1 + return {"worker_id": self.sample_count, "encoded_desc": param} diff --git a/vega/algorithms/hpo/hpo_base.py b/vega/algorithms/hpo/hpo_base.py index ca07b87..87f5113 100644 --- a/vega/algorithms/hpo/hpo_base.py +++ b/vega/algorithms/hpo/hpo_base.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined AshaHpo class.""" diff --git a/vega/algorithms/hpo/pbt_conf.py b/vega/algorithms/hpo/pbt_conf.py index dac007c..64ad05c 100644 --- a/vega/algorithms/hpo/pbt_conf.py +++ b/vega/algorithms/hpo/pbt_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/hpo/pbt_hpo.py b/vega/algorithms/hpo/pbt_hpo.py index f3204fe..df5ae4b 100644 --- a/vega/algorithms/hpo/pbt_hpo.py +++ b/vega/algorithms/hpo/pbt_hpo.py @@ -1,24 +1,30 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined PBTHpo class.""" import os import copy import shutil +import logging +import numpy as np from vega.algorithms.hpo.sha_base.pbt import PBT from vega.common import ClassFactory, ClassType from vega.common import FileOps from vega.algorithms.hpo.hpo_base import HPOBase from .pbt_conf import PBTConfig -import numpy as np -import logging @ClassFactory.register(ClassType.SEARCH_ALGORITHM) diff --git a/vega/algorithms/hpo/pbt_trainer_callback.py b/vega/algorithms/hpo/pbt_trainer_callback.py index 7f54133..b09d1a3 100644 --- a/vega/algorithms/hpo/pbt_trainer_callback.py +++ b/vega/algorithms/hpo/pbt_trainer_callback.py @@ -1,10 +1,16 @@ # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The trainer callback for pbt.""" import logging diff --git a/vega/algorithms/hpo/random_conf.py b/vega/algorithms/hpo/random_conf.py index f69663a..6251041 100644 --- a/vega/algorithms/hpo/random_conf.py +++ b/vega/algorithms/hpo/random_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/hpo/random_hpo.py b/vega/algorithms/hpo/random_hpo.py index ed23dfc..dda2c46 100644 --- a/vega/algorithms/hpo/random_hpo.py +++ b/vega/algorithms/hpo/random_hpo.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined RandomSearch class.""" from vega.common import ClassFactory, ClassType diff --git a/vega/algorithms/hpo/sha_base/asha.py b/vega/algorithms/hpo/sha_base/asha.py index 8239d30..db32dc1 100644 --- a/vega/algorithms/hpo/sha_base/asha.py +++ b/vega/algorithms/hpo/sha_base/asha.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ Asynchronous Successive Halving Algorithm. @@ -41,12 +47,12 @@ """ import math import logging +import random from math import log import numpy as np -import random +from vega.common.pareto_front import get_pareto from .sha_base import ShaBase from .status_type import StatusType -from vega.common.pareto_front import get_pareto logger = logging.getLogger(__name__) @@ -230,7 +236,7 @@ def _check_completed(self): return False max_rung_id = self.sieve_board['rung_id'].max() - if max_rung_id == self.total_rungs: + if max_rung_id >= self.total_rungs - 1: return True candidate_ids = self._get_top_k_config_ids(max_rung_id) diff --git a/vega/algorithms/hpo/sha_base/bo.py b/vega/algorithms/hpo/sha_base/bo.py index b3b3080..bd09c50 100644 --- a/vega/algorithms/hpo/sha_base/bo.py +++ b/vega/algorithms/hpo/sha_base/bo.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Bayesian Optimization framework.""" import random diff --git a/vega/algorithms/hpo/sha_base/bohb.py b/vega/algorithms/hpo/sha_base/bohb.py index 6e9c075..18c57ee 100644 --- a/vega/algorithms/hpo/sha_base/bohb.py +++ b/vega/algorithms/hpo/sha_base/bohb.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ @@ -38,13 +44,13 @@ """ import math -from .sha_base import ShaBase import logging +from vega.common.class_factory import ClassFactory, ClassType from .asha import ASHA from .tuner import TunerBuilder from ..ea.ga import GeneticAlgorithm +from .sha_base import ShaBase from .status_type import StatusType -from vega.common.class_factory import ClassFactory, ClassType logger = logging.getLogger(__name__) @@ -134,8 +140,8 @@ def _get_total_iters(self, num_samples, max_epochs, repeat_times, min_epochs=1, iter_list.append(int( count_list[i] - (math.pow(eta, iter) - 1) / (eta - 1))) iter_list.sort(reverse=True) - for i in range(len(iter_list)): - temp_ep = int(min_epochs * math.pow(eta, i)) + for j in range(len(iter_list)): + temp_ep = int(min_epochs * math.pow(eta, j)) if temp_ep > max_epochs: temp_ep = max_epochs min_ep_list.append(temp_ep) diff --git a/vega/algorithms/hpo/sha_base/boss.py b/vega/algorithms/hpo/sha_base/boss.py index 25ab7c6..50e60bc 100644 --- a/vega/algorithms/hpo/sha_base/boss.py +++ b/vega/algorithms/hpo/sha_base/boss.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ @@ -28,11 +34,12 @@ import operator import math -from .sha_base import ShaBase import random +from vega.core.search_space.search_space import SearchSpace +from vega.common.class_factory import ClassFactory, ClassType +from .sha_base import ShaBase from .ssa import SSA from .tuner import TunerBuilder -from vega.core.search_space.search_space import SearchSpace class BOSS(ShaBase): @@ -50,13 +57,16 @@ class BOSS(ShaBase): """ def __init__(self, search_space, num_samples, max_epochs, repeat_times, min_epochs=1, - eta=3): + eta=3, tuner="RF"): """Init BOSS.""" super().__init__(search_space, num_samples, max_epochs, min_epochs, eta) # init all the configs self.repeat_times = repeat_times - self.tuner = TunerBuilder(search_space, tuner='GP') + if tuner == "hebo": + self.tuner = ClassFactory.get_cls(ClassType.SEARCH_ALGORITHM, "HeboAdaptor")(search_space) + else: + self.tuner = TunerBuilder(search_space, tuner=tuner) self.iter_list, self.min_epoch_list = self._get_total_iters( num_samples, max_epochs, self.repeat_times, min_epochs, eta) self.config_dict = {} @@ -64,7 +74,10 @@ def __init__(self, search_space, num_samples, max_epochs, repeat_times, min_epoc # init the empty ssa config list, all ssa object need to be set_config_list self.ssa_list = self._get_ssa_list(self.iter_list, self.min_epoch_list, self.repeat_times, max_epochs) # init the first ssa with first config list - self.config_dict[0] = self.get_hyperparameters(self.iter_list[0]) + if tuner == "hebo": + self.config_dict[0] = self.tuner.propose(self.iter_list[0]) + else: + self.config_dict[0] = self.get_hyperparameters(self.iter_list[0]) self.ssa_list[0].set_config_list(self.config_dict[0], start_id=0) return @@ -106,8 +119,8 @@ def _get_total_iters(self, num_samples, max_epochs, repeat_times, min_epochs=1, iter_list.append(int( count_list[i] - (math.pow(eta, iter) - 1) / (eta - 1))) iter_list.sort(reverse=True) - for i in range(len(iter_list)): - temp_ep = int(min_epochs * math.pow(eta, i)) + for j in range(len(iter_list)): + temp_ep = int(min_epochs * math.pow(eta, j)) if temp_ep > max_epochs: temp_ep = max_epochs min_ep_list.append(temp_ep) diff --git a/vega/algorithms/hpo/sha_base/hebo_adaptor.py b/vega/algorithms/hpo/sha_base/hebo_adaptor.py index 72362b3..b768427 100644 --- a/vega/algorithms/hpo/sha_base/hebo_adaptor.py +++ b/vega/algorithms/hpo/sha_base/hebo_adaptor.py @@ -1,19 +1,29 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Hebo adaptor.""" +import logging import numpy as np -from vega.common.class_factory import ClassFactory, ClassType from hebo.design_space.design_space import DesignSpace from hebo.optimizers.hebo import HEBO +from vega.common.class_factory import ClassFactory, ClassType + + +logging.disable(logging.NOTSET) @ClassFactory.register(ClassType.SEARCH_ALGORITHM) @@ -23,8 +33,9 @@ class HeboAdaptor(object): def __init__(self, search_space=None, **kwargs): """Init BohbHpo.""" space = self._to_hebo_serch_space(search_space) - self.hebo = HEBO(space) + self.hebo = HEBO(space, model_name='gp') self.suggest_template = None + self.search_space = search_space def _to_hebo_serch_space(self, search_space): space = [] @@ -69,11 +80,12 @@ def propose(self, num=1): out = [] for index in list(recs.values())[0].keys(): rec = {key: value[index] for key, value in recs.items()} + rec = self.search_space.verify_constraints(rec) out.append(rec) return out def add(self, config, score): """Add a score.""" rec = self.suggest_template.append(config, ignore_index=True) - score = np.array([[score]]) + score = -1 * np.array([[score]]) return self.hebo.observe(rec, score) diff --git a/vega/algorithms/hpo/sha_base/hyperband.py b/vega/algorithms/hpo/sha_base/hyperband.py index 1f9baef..449ae39 100644 --- a/vega/algorithms/hpo/sha_base/hyperband.py +++ b/vega/algorithms/hpo/sha_base/hyperband.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ Hyperband: A Novel Bandit-Based Approach to Hyperparameter Optimization. @@ -34,9 +40,9 @@ import operator import math import random +from vega.core.search_space import SearchSpace from .sha_base import ShaBase from .sha import SHA -from vega.core.search_space import SearchSpace class HyperBand(ShaBase): diff --git a/vega/algorithms/hpo/sha_base/pbt.py b/vega/algorithms/hpo/sha_base/pbt.py index 28378e8..b046a9d 100644 --- a/vega/algorithms/hpo/sha_base/pbt.py +++ b/vega/algorithms/hpo/sha_base/pbt.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ Population Based Training Algorithm. @@ -28,11 +34,11 @@ """ import operator import shutil +import copy +from enum import Enum import numpy as np import pandas as pd from vega.common import FileOps -import copy -from enum import Enum class StatusType(Enum): diff --git a/vega/algorithms/hpo/sha_base/sha.py b/vega/algorithms/hpo/sha_base/sha.py index d84e46a..346a24c 100644 --- a/vega/algorithms/hpo/sha_base/sha.py +++ b/vega/algorithms/hpo/sha_base/sha.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ Successive Halving Algorithm. @@ -32,10 +38,10 @@ """ import operator import math +import random from math import log from .sha_base import ShaBase from .status_type import StatusType -import random class SHA(ShaBase): diff --git a/vega/algorithms/hpo/sha_base/sha_base.py b/vega/algorithms/hpo/sha_base/sha_base.py index d712358..f213cf3 100644 --- a/vega/algorithms/hpo/sha_base/sha_base.py +++ b/vega/algorithms/hpo/sha_base/sha_base.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ShaBase class.""" import numpy as np diff --git a/vega/algorithms/hpo/sha_base/ssa.py b/vega/algorithms/hpo/sha_base/ssa.py index 8c3b72a..9473a86 100644 --- a/vega/algorithms/hpo/sha_base/ssa.py +++ b/vega/algorithms/hpo/sha_base/ssa.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. r""" @@ -31,9 +37,9 @@ import operator import math from math import log +import random from .sha_base import ShaBase from .status_type import StatusType -import random class SSA(ShaBase): @@ -182,7 +188,11 @@ def _get_top_k_config_ids(self, k): leader_score = leader_score_df[leader_score_df['rung_id'] == rung_max] if current_score.empty or leader_score.empty: rung_max = rung_max - 1 + current_score = 0 + leader_score = 0 else: + current_score = current_score['score'].values + leader_score = leader_score['score'].values break sub_score = current_score - leader_score score_list.append((current_id, sub_score)) diff --git a/vega/algorithms/hpo/sha_base/status_type.py b/vega/algorithms/hpo/sha_base/status_type.py index 5df6246..e75e17a 100644 --- a/vega/algorithms/hpo/sha_base/status_type.py +++ b/vega/algorithms/hpo/sha_base/status_type.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """StatusType.""" from enum import Enum diff --git a/vega/algorithms/hpo/sha_base/tuner/ParzenEstimator.py b/vega/algorithms/hpo/sha_base/tuner/ParzenEstimator.py index eeef239..eeecceb 100644 --- a/vega/algorithms/hpo/sha_base/tuner/ParzenEstimator.py +++ b/vega/algorithms/hpo/sha_base/tuner/ParzenEstimator.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Class of ParzenEstimator.""" import numpy as np diff --git a/vega/algorithms/hpo/sha_base/tuner/acquire_function.py b/vega/algorithms/hpo/sha_base/tuner/acquire_function.py index 0e25c72..13058b5 100644 --- a/vega/algorithms/hpo/sha_base/tuner/acquire_function.py +++ b/vega/algorithms/hpo/sha_base/tuner/acquire_function.py @@ -1,16 +1,22 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Acquire functions.""" -import numpy as np import random +import numpy as np from scipy.stats import norm, multivariate_normal diff --git a/vega/algorithms/hpo/sha_base/tuner/double_gaussian.py b/vega/algorithms/hpo/sha_base/tuner/double_gaussian.py index 53eb618..51c18e2 100644 --- a/vega/algorithms/hpo/sha_base/tuner/double_gaussian.py +++ b/vega/algorithms/hpo/sha_base/tuner/double_gaussian.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Class of DoubleMultiGaussian.""" from sklearn.mixture import GaussianMixture diff --git a/vega/algorithms/hpo/sha_base/tuner/rfr.py b/vega/algorithms/hpo/sha_base/tuner/rfr.py index a658a68..ef6c44a 100644 --- a/vega/algorithms/hpo/sha_base/tuner/rfr.py +++ b/vega/algorithms/hpo/sha_base/tuner/rfr.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """RandomForestWithStdRegressor.""" import numpy as np diff --git a/vega/algorithms/hpo/sha_base/tuner/tuner_builder.py b/vega/algorithms/hpo/sha_base/tuner/tuner_builder.py index 7017b29..92f88fa 100644 --- a/vega/algorithms/hpo/sha_base/tuner/tuner_builder.py +++ b/vega/algorithms/hpo/sha_base/tuner/tuner_builder.py @@ -1,16 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Base Tuner.""" -import numpy as np import logging +import numpy as np from .tuner_model import TunerModel from .acquire_function import expected_improvement, thompson_sampling diff --git a/vega/algorithms/hpo/sha_base/tuner/tuner_model.py b/vega/algorithms/hpo/sha_base/tuner/tuner_model.py index b549c77..2b7dd1a 100644 --- a/vega/algorithms/hpo/sha_base/tuner/tuner_model.py +++ b/vega/algorithms/hpo/sha_base/tuner/tuner_model.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Class of Tuner model.""" import warnings @@ -15,8 +21,8 @@ from sklearn.gaussian_process.kernels import (Matern) from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler -from .rfr import RandomForestWithStdRegressor from vega.algorithms.hpo.sha_base.tuner.ParzenEstimator import ParzenEstimator +from .rfr import RandomForestWithStdRegressor class TunerModel(object): diff --git a/vega/algorithms/nas/__init__.py b/vega/algorithms/nas/__init__.py index 1fe2243..b992f85 100644 --- a/vega/algorithms/nas/__init__.py +++ b/vega/algorithms/nas/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import nas algorithms.""" @@ -31,5 +37,5 @@ "sr_ea": ["SRCodec", "SRMutate", "SRRandom"], "mfasc": ["search_algorithm:MFASC"], "opt_nas": ["OperatorSearchSpace", "OperatorReplaceCallback"], - "dag_mutate": ["DAGMutateSearchSpace"] + "dag_block_nas": ["DAGBlockNas"], }) diff --git a/vega/algorithms/nas/adelaide_ea/adelaide_ea_codec.py b/vega/algorithms/nas/adelaide_ea/adelaide_ea_codec.py index 986b6a7..bb39978 100644 --- a/vega/algorithms/nas/adelaide_ea/adelaide_ea_codec.py +++ b/vega/algorithms/nas/adelaide_ea/adelaide_ea_codec.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Encode and decode the model config.""" import numpy as np diff --git a/vega/algorithms/nas/adelaide_ea/adelaide_mutate.py b/vega/algorithms/nas/adelaide_ea/adelaide_mutate.py index 153dc6c..8ecec02 100644 --- a/vega/algorithms/nas/adelaide_ea/adelaide_mutate.py +++ b/vega/algorithms/nas/adelaide_ea/adelaide_mutate.py @@ -1,23 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Random search algorithm for AdelaideEA.""" import logging import random from copy import deepcopy - -from .conf import AdelaideConfig from vega.common import ClassFactory, ClassType -from vega.common import FileOps from vega.report import ReportServer from vega.core.search_algs import SearchAlgorithm +from .conf import AdelaideConfig @ClassFactory.register(ClassType.SEARCH_ALGORITHM) @@ -34,26 +38,6 @@ def __init__(self, search_space=None): super(AdelaideMutate, self).__init__(search_space) self.max_sample = self.config.max_sample self.sample_count = 0 - self._copy_needed_file() - - def _copy_needed_file(self): - if self.config.pareto_front_file is None: - raise FileNotFoundError( - "Config item paretor_front_file not found in config file.") - init_pareto_front_file = self.config.pareto_front_file.replace( - "{local_base_path}", self.local_base_path) - self.pareto_front_file = FileOps.join_path( - self.local_output_path, self.step_name, "pareto_front.csv") - FileOps.make_base_dir(self.pareto_front_file) - FileOps.copy_file(init_pareto_front_file, self.pareto_front_file) - if self.config.random_file is None: - raise FileNotFoundError( - "Config item random_file not found in config file.") - init_random_file = self.config.random_file.replace( - "{local_base_path}", self.local_base_path) - self.random_file = FileOps.join_path( - self.local_output_path, self.step_name, "random.csv") - FileOps.copy_file(init_random_file, self.random_file) @property def is_completed(self): diff --git a/vega/algorithms/nas/adelaide_ea/adelaide_random.py b/vega/algorithms/nas/adelaide_ea/adelaide_random.py index ec0a2f8..095ae53 100644 --- a/vega/algorithms/nas/adelaide_ea/adelaide_random.py +++ b/vega/algorithms/nas/adelaide_ea/adelaide_random.py @@ -1,19 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Random search algorithm for Adelaide EA.""" import random from copy import deepcopy -from .conf import AdelaideConfig from vega.common import ClassFactory, ClassType from vega.core.search_algs import SearchAlgorithm +from .conf import AdelaideConfig @ClassFactory.register(ClassType.SEARCH_ALGORITHM) diff --git a/vega/algorithms/nas/adelaide_ea/adelaide_trainer_callback.py b/vega/algorithms/nas/adelaide_ea/adelaide_trainer_callback.py index a311bd7..fc36534 100644 --- a/vega/algorithms/nas/adelaide_ea/adelaide_trainer_callback.py +++ b/vega/algorithms/nas/adelaide_ea/adelaide_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The trainer program for Adelaide_EA.""" import logging diff --git a/vega/algorithms/nas/adelaide_ea/conf.py b/vega/algorithms/nas/adelaide_ea/conf.py index debd05d..87a8ee3 100644 --- a/vega/algorithms/nas/adelaide_ea/conf.py +++ b/vega/algorithms/nas/adelaide_ea/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable @@ -17,7 +23,6 @@ class AdelaideConfig(ConfigSerializable): codec = 'AdelaideCodec' max_sample = 10 - pareto_front_file = "{local_base_path}/output/random/pareto_front.csv" random_file = "{local_base_path}/output/random/random.csv" objective_keys = ['IoUMetric', 'flops'] @@ -26,7 +31,6 @@ def rules(cls): """Return rules for checking.""" rules_AdelaideConfig = {"codec": {"type": str}, "max_sample": {"type": int}, - "pareto_front_file": {"type": str}, "random_file": {"type": str}, "objective_keys": {"type": (list, str)} } diff --git a/vega/algorithms/nas/auto_lane/auto_lane_nas_algorithm.py b/vega/algorithms/nas/auto_lane/auto_lane_nas_algorithm.py index fd4b3ba..8fcddfe 100644 --- a/vega/algorithms/nas/auto_lane/auto_lane_nas_algorithm.py +++ b/vega/algorithms/nas/auto_lane/auto_lane_nas_algorithm.py @@ -1,27 +1,33 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """AutoLane algorithm.""" import os import random import logging +import pandas as pd from vega.core.search_algs import SearchAlgorithm from vega.common import FileOps -from .utils.resnet_variant_det_codec import ResNetVariantDetCodec -from .utils.resnext_variant_det_codec import ResNeXtVariantDetCodec -import pandas as pd -from .conf import AutoLaneConfig from vega.report import ReportServer from vega.common import ClassType, ClassFactory from vega.common.config import Config from vega.common import update_dict +from .utils.resnet_variant_det_codec import ResNetVariantDetCodec +from .utils.resnext_variant_det_codec import ResNeXtVariantDetCodec +from .conf import AutoLaneConfig @ClassFactory.register(ClassType.SEARCH_ALGORITHM) diff --git a/vega/algorithms/nas/auto_lane/auto_lane_nas_codec.py b/vega/algorithms/nas/auto_lane/auto_lane_nas_codec.py index 16623de..1bfae67 100644 --- a/vega/algorithms/nas/auto_lane/auto_lane_nas_codec.py +++ b/vega/algorithms/nas/auto_lane/auto_lane_nas_codec.py @@ -1,21 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Define BackboneNasCodec.""" - -from .utils.str2dict import str2dict -from .utils.resnet_variant_det_codec import ResNetVariantDetCodec -from .utils.resnext_variant_det_codec import ResNeXtVariantDetCodec from vega.common import ClassType, ClassFactory from vega.core.search_algs.codec import Codec from vega.common import Config +from .utils.str2dict import str2dict +from .utils.resnet_variant_det_codec import ResNetVariantDetCodec +from .utils.resnext_variant_det_codec import ResNeXtVariantDetCodec @ClassFactory.register(ClassType.CODEC) @@ -74,8 +79,7 @@ def decode(self, sample): CodecSpec = decoder_map.get(backbone_code[0], None) if CodecSpec is None: raise NotImplementedError(f'Only {decoder_map} is support in auto_lane algorithm') - generator = CodecSpec(**CodecSpec.arch_decoder(backbone_code)) - backbone_desc = str2dict(generator.config) + backbone_desc = CodecSpec(**CodecSpec.arch_decoder(backbone_code)).config neck_desc = dict( arch_code=ffm_code, type='FeatureFusionModule', diff --git a/vega/algorithms/nas/auto_lane/auto_lane_trainer_callback.py b/vega/algorithms/nas/auto_lane/auto_lane_trainer_callback.py index 9f19003..447402f 100644 --- a/vega/algorithms/nas/auto_lane/auto_lane_trainer_callback.py +++ b/vega/algorithms/nas/auto_lane/auto_lane_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The trainer program for Auto Lane.""" @@ -28,7 +34,6 @@ def logger_patch(self): """Patch the default logger.""" worker_path = self.trainer.get_local_worker_path() worker_spec_log_file = FileOps.join_path(worker_path, 'current_worker.log') - logger = logging.getLogger(__name__) for hdlr in logger.handlers: logger.removeHandler(hdlr) for hdlr in logging.root.handlers: @@ -82,10 +87,6 @@ def train_step(self, batch): 'loc_loss': loss_loc.item(), 'train_batch_output': None} - # def before_valid(self, logs=None): - # """Be called before a batch validation.""" - # epochs = self.params['epochs'] - def valid_step(self, batch): """Be called on each batch validing.""" self.trainer.model.eval() diff --git a/vega/algorithms/nas/auto_lane/conf.py b/vega/algorithms/nas/auto_lane/conf.py index 0bac3ee..3b17944 100644 --- a/vega/algorithms/nas/auto_lane/conf.py +++ b/vega/algorithms/nas/auto_lane/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/nas/auto_lane/utils/backbone_codec.py b/vega/algorithms/nas/auto_lane/utils/backbone_codec.py index 44dc2f9..1717539 100644 --- a/vega/algorithms/nas/auto_lane/utils/backbone_codec.py +++ b/vega/algorithms/nas/auto_lane/utils/backbone_codec.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The base class of backbone codec.""" diff --git a/vega/algorithms/nas/auto_lane/utils/listdict.py b/vega/algorithms/nas/auto_lane/utils/listdict.py index 39fa1cb..03b2ac8 100644 --- a/vega/algorithms/nas/auto_lane/utils/listdict.py +++ b/vega/algorithms/nas/auto_lane/utils/listdict.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Class of list dict.""" diff --git a/vega/algorithms/nas/auto_lane/utils/resnet_variant_codec.py b/vega/algorithms/nas/auto_lane/utils/resnet_variant_codec.py index 968b23e..2d18fc3 100644 --- a/vega/algorithms/nas/auto_lane/utils/resnet_variant_codec.py +++ b/vega/algorithms/nas/auto_lane/utils/resnet_variant_codec.py @@ -1,21 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The ResNet_Variant for encode.""" -from .str2dict import dict2str -from .listdict import ListDict -from collections import OrderedDict -from .backbone_codec import Backbone import random -import numpy as np import re +import logging +from collections import OrderedDict +import numpy as np +from .listdict import ListDict +from .backbone_codec import Backbone class ResNet_Variant(Backbone): @@ -52,7 +58,6 @@ def __init__(self, **kwargs): """Contruct ResNet_Variant encoder.""" super(ResNet_Variant, self).__init__(*args, **kwargs) - # set sampled params self.arch = arch self.base_channel = int(base_channel) self.depth = self.base_depth = base_depth @@ -60,7 +65,6 @@ def __init__(self, if block not in ['BasicBlock', 'Bottleneck']: raise Exception('Invalid block name. (should be BasicBlock or Bottleneck)') expansion = 1 if block == 'BasicBlock' else 4 - # other params if self.train_from_scratch: self.zero_init_residual = False self.frozen_stages = -1 @@ -73,7 +77,6 @@ def __init__(self, self.dilations = self._base_dilations[:self.num_stages] self.out_indices = self._base_out_indices[:] if self.with_neck else (2,) self.out_strides = [2 ** (i + 2) for i in range(self.num_stages)] if self.with_neck else [16] - # out channels num_scale = 0 self.out_channels = [] for stage in range(self.num_stages): @@ -145,9 +148,9 @@ def sample(cls, method='random', base_depth=50, base_arch=None, - sampled_archs=[], + sampled_archs=None, flops_constraint=None, - EA_setting=dict(num_mutate=3), + EA_setting=None, fore_part=None, max_sample_num=100000, **kwargs @@ -172,13 +175,16 @@ def sample(cls, :type max_sample_num: int :return: model dict """ + if sampled_archs is None: + sampled_archs = [] + if EA_setting is None: + EA_setting = dict(num_mutate=3) if flops_constraint is None: low_flops, high_flops = 0, float('inf') else: low_flops, high_flops = flops_constraint sample_num = 0 discard = ListDict() - # params = cls.quest_param(fore_part=fore_part, **kwargs) params = {} while sample_num < max_sample_num: sample_num += 1 @@ -191,11 +197,9 @@ def sample(cls, params.update(cls.arch_decoder(arch_code=base_arch)) else: raise ValueError('Unrecognized sample method {}.') - # construct config net = cls(**params, base_depth=base_depth, fore_part=fore_part) exist = net.name in sampled_archs + discard['arch'] success = low_flops <= net.flops_ratio <= high_flops - # state = 'Exist' if exist else 'Success' * success + 'Discard' * (not success) flops_info = '{}({})'.format(net.flops, net.flops_ratio) if exist: continue @@ -217,7 +221,6 @@ def random_sample(cls, base_channel, with_neck=True): else: num_reduction, num_stage = 2, 3 arch_space = cls.attr_space['arch'] - # base_channel = random.choice(cls.attr_space['base_channel']) length = random.randint(*arch_space['num_block']) arch = ['1'] * length position = np.random.choice(length, size=num_reduction, replace=False) @@ -295,6 +298,7 @@ def _swap(arch, R): arch[idx], arch[idx + direction] = arch[idx + direction], arch[idx] break except Exception: + logging.debug("Arch is not match, continue.") continue ops.append('swap:{}&{}'.format(idx, idx + direction)) return arch @@ -318,7 +322,7 @@ def is_valid(arch): min_block, max_block = arch_space['num_block'] params = cls.arch_decoder(base_arch) base_channel, base_arch = params.get('base_channel'), params.get('arch') - while True: # whether to mutate base_channel + while True: ops = [] new_arch = list(base_arch) new_channel = base_channel @@ -337,6 +341,7 @@ def is_valid(arch): else: raise Exception('operation index out of range') except Exception: + logging.debug("Arch is not match, continue.") continue new_arch = ''.join(new_arch) if is_valid(new_arch) and (new_arch != base_arch or new_channel != base_channel): @@ -366,4 +371,4 @@ def config(self): conv_cfg=self.conv_cfg, out_channels=self.out_channels, style='pytorch') - return dict2str(config, tab=2) + return config diff --git a/vega/algorithms/nas/auto_lane/utils/resnet_variant_det_codec.py b/vega/algorithms/nas/auto_lane/utils/resnet_variant_det_codec.py index 8f38a52..f24785c 100644 --- a/vega/algorithms/nas/auto_lane/utils/resnet_variant_det_codec.py +++ b/vega/algorithms/nas/auto_lane/utils/resnet_variant_det_codec.py @@ -1,21 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The ResNet_Variant for encode.""" -from .str2dict import dict2str -from .listdict import ListDict -from collections import OrderedDict -from .backbone_codec import Backbone + import random -import numpy as np import re +import logging +from collections import OrderedDict +import numpy as np +from .listdict import ListDict +from .backbone_codec import Backbone class ResNetVariantDetCodec(Backbone): @@ -52,7 +59,6 @@ def __init__(self, **kwargs): """Contruct ResNet_Variant encoder.""" super(ResNetVariantDetCodec, self).__init__(*args, **kwargs) - # set sampled params self.arch = arch self.base_channel = int(base_channel) self.depth = self.base_depth = base_depth @@ -60,7 +66,6 @@ def __init__(self, if block not in ['BasicBlock', 'Bottleneck']: raise Exception('Invalid block name. (should be BasicBlock or Bottleneck)') expansion = 1 if block == 'BasicBlock' else 4 - # other params if self.train_from_scratch: self.zero_init_residual = False self.frozen_stages = -1 @@ -73,7 +78,6 @@ def __init__(self, self.dilations = self._base_dilations[:self.num_stages] self.out_indices = self._base_out_indices[:] if self.with_neck else (2,) self.out_strides = [2 ** (i + 2) for i in range(self.num_stages)] if self.with_neck else [16] - # out channels num_scale = 0 self.out_channels = [] for stage in range(self.num_stages): @@ -145,9 +149,9 @@ def sample(cls, method='random', base_depth=50, base_arch=None, - sampled_archs=[], + sampled_archs=None, flops_constraint=None, - EA_setting=dict(num_mutate=3), + EA_setting=None, fore_part=None, max_sample_num=100000, **kwargs @@ -172,13 +176,16 @@ def sample(cls, :type max_sample_num: int :return: model dict """ + if sampled_archs is None: + sampled_archs = [] + if EA_setting is None: + EA_setting = dict(num_mutate=3) if flops_constraint is None: low_flops, high_flops = 0, float('inf') else: low_flops, high_flops = flops_constraint sample_num = 0 discard = ListDict() - # params = cls.quest_param(fore_part=fore_part, **kwargs) params = {} while sample_num < max_sample_num: sample_num += 1 @@ -191,11 +198,9 @@ def sample(cls, params.update(cls.arch_decoder(arch_code=base_arch)) else: raise ValueError('Unrecognized sample method {}.') - # construct config net = cls(**params, base_depth=base_depth, fore_part=fore_part) exist = net.name in sampled_archs + discard['arch'] success = low_flops <= net.flops_ratio <= high_flops - # state = 'Exist' if exist else 'Success' * success + 'Discard' * (not success) flops_info = '{}({})'.format(net.flops, net.flops_ratio) if exist: continue @@ -294,6 +299,7 @@ def _swap(arch, R): arch[idx], arch[idx + direction] = arch[idx + direction], arch[idx] break except Exception: + logging.debug("Arch is not match, continue.") continue ops.append('swap:{}&{}'.format(idx, idx + direction)) return arch @@ -317,7 +323,7 @@ def is_valid(arch): min_block, max_block = arch_space['num_block'] params = cls.arch_decoder(base_arch) base_channel, base_arch = params.get('base_channel'), params.get('arch') - while True: # whether to mutate base_channel + while True: ops = [] new_arch = list(base_arch) new_channel = base_channel @@ -336,6 +342,7 @@ def is_valid(arch): else: raise Exception('operation index out of range') except Exception: + logging.debug("Arch is not match, continue.") continue new_arch = ''.join(new_arch) if is_valid(new_arch) and (new_arch != base_arch or new_channel != base_channel): @@ -365,4 +372,4 @@ def config(self): conv_cfg=self.conv_cfg, out_channels=self.out_channels, style='pytorch') - return dict2str(config, tab=2) + return config diff --git a/vega/algorithms/nas/auto_lane/utils/resnext_all_variant_codec.py b/vega/algorithms/nas/auto_lane/utils/resnext_all_variant_codec.py index 06b8f32..429857c 100644 --- a/vega/algorithms/nas/auto_lane/utils/resnext_all_variant_codec.py +++ b/vega/algorithms/nas/auto_lane/utils/resnext_all_variant_codec.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The ResNeXt_Variant for encode.""" -from vega.algorithms.nas.auto_lane.utils.str2dict import dict2str +import re from collections import OrderedDict +from vega.algorithms.nas.auto_lane.utils.str2dict import dict2str from vega.algorithms.nas.auto_lane.utils.resnet_variant_codec import ResNet_Variant -import re class ResNeXt_all_Variant(ResNet_Variant): @@ -129,4 +135,4 @@ def config(self): conv_cfg=self.conv_cfg, out_channels=self.out_channels, style='pytorch') - return dict2str(config, tab=2) + return config diff --git a/vega/algorithms/nas/auto_lane/utils/resnext_variant_det_codec.py b/vega/algorithms/nas/auto_lane/utils/resnext_variant_det_codec.py index 0c1e255..d5ab5a7 100644 --- a/vega/algorithms/nas/auto_lane/utils/resnext_variant_det_codec.py +++ b/vega/algorithms/nas/auto_lane/utils/resnext_variant_det_codec.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The ResNeXt_Variant for encode.""" -from vega.algorithms.nas.auto_lane.utils.str2dict import dict2str +import re from collections import OrderedDict +from vega.algorithms.nas.auto_lane.utils.str2dict import dict2str from vega.algorithms.nas.auto_lane.utils.resnet_variant_det_codec import ResNetVariantDetCodec -import re class ResNeXtVariantDetCodec(ResNetVariantDetCodec): @@ -129,4 +135,4 @@ def config(self): conv_cfg=self.conv_cfg, out_channels=self.out_channels, style='pytorch') - return dict2str(config, tab=2) + return config diff --git a/vega/algorithms/nas/auto_lane/utils/str2dict.py b/vega/algorithms/nas/auto_lane/utils/str2dict.py index a9f7966..00d7795 100644 --- a/vega/algorithms/nas/auto_lane/utils/str2dict.py +++ b/vega/algorithms/nas/auto_lane/utils/str2dict.py @@ -1,14 +1,21 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Utilities for the conversion between str and dict.""" +import ast def str_warp(str_): @@ -39,7 +46,7 @@ def str2dict(str_): raise TypeError('"str_" must be a string, not {}'.format(type(str_))) # to keep the keys order str_.replace('dict(', 'OrderedDict(') - return eval(str_) + return ast.literal_eval(str_) def dict2str(dict_, tab=0, format_first_line=False, in_one_line=False): diff --git a/vega/algorithms/nas/backbone_nas/backbone_nas.py b/vega/algorithms/nas/backbone_nas/backbone_nas.py index 2cbcd2b..496c088 100644 --- a/vega/algorithms/nas/backbone_nas/backbone_nas.py +++ b/vega/algorithms/nas/backbone_nas/backbone_nas.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined BackboneNas.""" import random -import numpy as np import logging +import numpy as np from vega.core.search_algs import SearchAlgorithm from vega.core.search_algs import ParetoFront from vega.common import ClassFactory, ClassType @@ -153,6 +159,7 @@ def _swap(self, arch, R): idx] break except Exception: + logging.debug("Arch is not match, continue.") continue return arch diff --git a/vega/algorithms/nas/backbone_nas/backbone_nas_codec.py b/vega/algorithms/nas/backbone_nas/backbone_nas_codec.py index bf3303a..35213b6 100644 --- a/vega/algorithms/nas/backbone_nas/backbone_nas_codec.py +++ b/vega/algorithms/nas/backbone_nas/backbone_nas_codec.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined BackboneNasCodec.""" import copy @@ -53,8 +59,6 @@ def encode(self, sample_desc, is_random=False): base_depth = sample_desc['network.backbone.depth'] double_channel = sample_desc.get('network.backbone.doublechannel', None) down_sample = sample_desc.get('network.backbone.downsample', None) - # if double_channel != down_sample: - # return None code = [[], []] if base_depth in layer_to_block: if is_random or double_channel != default_count and double_channel is not None: @@ -94,7 +98,5 @@ def decode(self, sample): desc["network.backbone.doublechannel"] = code[0] if "network.backbone.downsample" in desc: desc["network.backbone.downsample"] = code[1] - # if len(desc["network.backbone.downsample"]) != len(desc["network.backbone.doublechannel"]): - # return None logging.info("decode:{}".format(desc)) return desc diff --git a/vega/algorithms/nas/backbone_nas/backbone_nas_space.py b/vega/algorithms/nas/backbone_nas/backbone_nas_space.py index 4497c35..684afbb 100644 --- a/vega/algorithms/nas/backbone_nas/backbone_nas_space.py +++ b/vega/algorithms/nas/backbone_nas/backbone_nas_space.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Check and Define Prune Model SearchSpace.""" import logging diff --git a/vega/algorithms/nas/backbone_nas/conf.py b/vega/algorithms/nas/backbone_nas/conf.py index cddbf2a..28d5274 100644 --- a/vega/algorithms/nas/backbone_nas/conf.py +++ b/vega/algorithms/nas/backbone_nas/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" diff --git a/vega/algorithms/nas/cars/cars_alg.py b/vega/algorithms/nas/cars/cars_alg.py index 06573c8..9e88377 100644 --- a/vega/algorithms/nas/cars/cars_alg.py +++ b/vega/algorithms/nas/cars/cars_alg.py @@ -1,30 +1,36 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """DifferentialAlgorithm.""" + import copy import logging import os from collections import namedtuple -import numpy as np import vega -from .conf import CARSConfig from vega.common import Config +import numpy as np from vega.algorithms.nas.darts_cnn import DartsNetworkTemplateConfig from vega.common import ClassFactory, ClassType from vega.core.search_algs import SearchAlgorithm from vega.report import SortAndSelectPopulation from vega.report.report_client import ReportClient -from vega.report.report_server import ReportServer from .nsga3 import CARS_NSGA from .utils import eval_model_parameters +from .conf import CARSConfig if vega.is_torch_backend(): import torch @@ -103,7 +109,6 @@ def gen_offspring(self, alphas, offspring_ratio=1.0): alphas_c = self.random_sample_path() if self.judge_repeat(alphas, alphas_c) == 0: offsprings.append(alphas_c) - # offsprings = torch.cat([offspring.unsqueeze(0) for offspring in offsprings], dim=0) offsprings = np.stack(offsprings, axis=0) return offsprings @@ -186,7 +191,7 @@ def search_evol_arch(self, epoch, alg_policy, trainer, alphas): if alg_policy.select_method == 'uniform': selected_genotypes, selected_acc, selected_model_sizes = self.select_uniform_pareto_front( np.array(fitness_keep), np.array(size_keep), genotype_keep) - else: # default: first + else: selected_genotypes, selected_acc, selected_model_sizes = self.select_first_pareto_front( np.array(fitness_keep), np.array(size_keep), genotype_keep) @@ -223,7 +228,6 @@ def search_infer_step(self, alpha): logits = self.trainer.model(input, alpha=alpha_tensor) metrics(logits, target) elif vega.is_tf_backend(): - # self.trainer.valid_alpha = tf.convert_to_tensor(alpha) metrics = self.trainer.valid_metrics setattr(self.trainer, 'valid_alpha', alpha) eval_results = self.trainer.estimator.evaluate(input_fn=self.trainer.valid_loader.input_fn, @@ -287,8 +291,7 @@ def select_uniform_pareto_front(self, fitness, obj, genotypes): _range = max_acc - min_acc if _range == 0.: return genotypes[:1], fitness[:1], obj[:1] - ratio = 0.5 / _range - keep = (((fitness - min_acc) / _range) > ratio) + keep = (((fitness - min_acc) / _range) > 0.5) fitness = fitness[keep] obj = obj[keep] genotypes = [i for (i, v) in zip(genotypes, keep) if v] @@ -346,7 +349,6 @@ def crossover(self, alphas_a, alphas_b, ratio=0.5): new_alphas_reduce_ops0[i] = new_alphas_reduce_ops1[i].copy() alphas_normal = self._node_ops_to_alpha(new_alphas_normal_node0, new_alphas_normal_ops0).copy() alphas_reduce = self._node_ops_to_alpha(new_alphas_reduce_node0, new_alphas_reduce_ops0).copy() - # alphas = torch.cat([alphas_normal, alphas_reduce], dim=0) alphas = np.concatenate([alphas_normal, alphas_reduce], axis=0) return alphas diff --git a/vega/algorithms/nas/cars/cars_trainer_callback.py b/vega/algorithms/nas/cars/cars_trainer_callback.py index 5d94af1..b82417b 100644 --- a/vega/algorithms/nas/cars/cars_trainer_callback.py +++ b/vega/algorithms/nas/cars/cars_trainer_callback.py @@ -1,18 +1,25 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """CARS trainer.""" -import numpy as np -from collections import namedtuple + import logging +from collections import namedtuple import vega +import numpy as np from vega.common import ClassFactory, ClassType from vega.core.search_space import SearchSpace from vega.core.search_algs import SearchAlgorithm @@ -43,7 +50,6 @@ def __init__(self): def before_train(self, logs=None): """Be called before the training process.""" - # Use zero valid_interval to supress default valid step self.trainer.valid_interval = 0 self.trainer.config.report_on_epoch = True if vega.is_torch_backend(): @@ -52,7 +58,6 @@ def before_train(self, logs=None): self.search_alg = SearchAlgorithm(SearchSpace()) self.alg_policy = self.search_alg.config.policy self.set_algorithm_model(self.trainer.model) - # setup alphas n_individual = self.alg_policy.num_individual self.alphas = np.stack([self.search_alg.random_sample_path() for i in range(n_individual)], axis=0) @@ -79,7 +84,6 @@ def train_step(self, batch): alpha = torch.from_numpy(self.search_alg.random_sample_path()).cuda() elif vega.is_npu_device(): alpha = torch.from_numpy(self.search_alg.random_sample_path()).to(vega.get_devices()) - # logits = self.trainer.model.forward_random(input) else: alpha = alphas[i] logits = self.trainer.model(input, alpha=alpha) diff --git a/vega/algorithms/nas/cars/conf.py b/vega/algorithms/nas/cars/conf.py index 1bf0385..af68f31 100644 --- a/vega/algorithms/nas/cars/conf.py +++ b/vega/algorithms/nas/cars/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" diff --git a/vega/algorithms/nas/cars/nsga3.py b/vega/algorithms/nas/cars/nsga3.py index 4897636..4b17a08 100644 --- a/vega/algorithms/nas/cars/nsga3.py +++ b/vega/algorithms/nas/cars/nsga3.py @@ -1,16 +1,22 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Function for pNSGA-III.""" -import numpy as np import random +import numpy as np from vega.report import NonDominatedSorting diff --git a/vega/algorithms/nas/cars/utils.py b/vega/algorithms/nas/cars/utils.py index af62397..7968da4 100644 --- a/vega/algorithms/nas/cars/utils.py +++ b/vega/algorithms/nas/cars/utils.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Util functions.""" import numpy as np diff --git a/vega/algorithms/nas/dag_block_nas/__init__.py b/vega/algorithms/nas/dag_block_nas/__init__.py new file mode 100644 index 0000000..8554ca9 --- /dev/null +++ b/vega/algorithms/nas/dag_block_nas/__init__.py @@ -0,0 +1,3 @@ +from .block_nas import DAGBlockNas + +__all__ = ["DAGBlockNas"] diff --git a/vega/algorithms/nas/dag_block_nas/block_generator.py b/vega/algorithms/nas/dag_block_nas/block_generator.py new file mode 100644 index 0000000..d84a941 --- /dev/null +++ b/vega/algorithms/nas/dag_block_nas/block_generator.py @@ -0,0 +1,125 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This is DAG Cell for network.""" +import random +import logging +import numpy as np +from vega.common.config import Config +from vega.common.class_factory import ClassFactory, ClassType +from vega.metrics.flops_and_params import calc_model_flops_params +from vega.algorithms.nas.dnet_nas.dblock_nas_codec import decode_d_block_str + + +def forward_block_out_shape(block_desc, input_shape, idx=None, cal_flops_and_params=False): + """Forward blocks.""" + import torch + block = ClassFactory.get_instance(ClassType.NETWORK, block_desc) + x = torch.ones(*input_shape) + out_shape = block(x).shape + if idx: + out_shape = out_shape[idx] + if cal_flops_and_params: + flops, params = calc_model_flops_params(block, x) + return out_shape, flops * 1e-9, params * 1e-6 + return out_shape + + +class BlockGenerator(object): + """Block generator.""" + + def __init__(self, c_in, block_type='GhostModule'): + self.c_in = c_in + self.c_out = None + self.flops = None + self.params = None + self.block_type = block_type + self.each_block_max_samples = 10 + + def _gen_one_block(self): + if self.block_type == 'DBlock': + return gen_d_blocks(self.c_in) + elif self.block_type == 'DAGGraphCell': + return gen_graph_cell(self.c_in) + return gen_ghost_block(self.c_in) + + def run(self, block): + """Run generator.""" + flops, params = block.flops_params + for _ in range(self.each_block_max_samples): + target_block = self._gen_one_block() + if target_block and block.c_in == target_block.c_in and block.c_out == target_block.c_out: + if self._params_filter(target_block, params): + return target_block + + @classmethod + def _flops_filter(cls, target_block, flops): + return 0.5 * flops < target_block.c_flops < 2 * flops + + @classmethod + def _params_filter(cls, target_block, params): + return 0.5 * params < target_block.c_params < 1.5 * params + + +def gen_ghost_block(c_in): + """Generate Ghost block.""" + block_str = random.choice([1, 2, 3]) + planes = random.choice([16, 32, 64, 128, 256, 512]) + stride = random.choice([1, 2]) + target_block = Config(dict(type="GhostModule", inplanes=c_in, planes=planes, blocks=block_str, stride=stride)) + target_block.c_in = c_in + target_block.c_out, target_block.c_flops, target_block.c_params = forward_block_out_shape( + target_block, [2, c_in, 32, 32], 1, True) + return target_block + + +def gen_d_blocks(c_in): + """Generate d blocks.""" + try: + op_choices = 7 + channel_choices = 5 + op_num = random.choice([1, 2, 3]) + skip_num = random.choice([0, 1]) + block_str = decode_d_block_str(op_choices, channel_choices, op_num, skip_num) + stride = random.choice([1, 2]) + ops = ['conv3', 'conv1', 'conv3_grp2', 'conv3_grp4', 'conv3_base1', 'conv3_base32', 'conv3_sep'] + target_block = Config( + dict(type="EncodedBlock", block_str=block_str, in_channel=c_in, op_names=ops, stride=stride)) + target_block.c_in = c_in + target_block.c_out, target_block.c_flops, target_block.c_params = forward_block_out_shape( + target_block, [2, c_in, 32, 32], 1, True) + return target_block + except Exception as ex: + logging.debug("Failed to generate D blocks. ex={}".format(ex)) + return None + + +def gen_graph_cell(c_in): + """Generate graph cell.""" + try: + matrix_deep = random.choice([3, 4, 5, 6, 7]) + adj_matrix = np.array(np.random.randint(2, size=(matrix_deep, matrix_deep))) + nodes = ['Input', 'Conv1x1BnRelu', 'Conv3x3BnRelu', 'Conv3x3BnRelu', 'Conv3x3BnRelu', 'MaxPool3x3', + 'Output'] + out_channels = random.choice([64, 128, 256, 512, 1024]) + target_block = Config( + dict(type='DagGraphCell', adj_matrix=adj_matrix, nodes=nodes, in_channels=c_in, + out_channels=out_channels)) + target_block.c_in = c_in + target_block.c_out = forward_block_out_shape(target_block, [1, c_in, 32, 32], idx=1) + return target_block + except Exception as ex: + logging.debug("Failed to generate Graph Cell. ex={}".format(ex)) + return None diff --git a/vega/algorithms/nas/dag_block_nas/block_nas.py b/vega/algorithms/nas/dag_block_nas/block_nas.py new file mode 100644 index 0000000..420f5d7 --- /dev/null +++ b/vega/algorithms/nas/dag_block_nas/block_nas.py @@ -0,0 +1,188 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This is DAG Cell for network.""" +import copy +import logging +import random +from collections import OrderedDict + +from vega.common import ClassFactory, ClassType, ConfigSerializable +from vega.core.search_algs import SearchAlgorithm + +from vega.core.pipeline.conf import PipeStepConfig +from vega.model_zoo import ModelZoo +from vega.algorithms.nas.dag_block_nas.match_blocks import match_blocks, SpaceIterRecord, SpaceIter, \ + mutate_sub_blocks, check_latency +from vega.report.report_server import ReportServer + + +class DAGBlockNasConfig(ConfigSerializable): + """DAG Block Nas Config.""" + + num_samples = 100 + mutation_method = 'progressive' # random/progressive + check_latency = False + + +@ClassFactory.register(ClassType.SEARCH_ALGORITHM) +class DAGBlockNas(SearchAlgorithm): + """DAG Block Nas.""" + + config = DAGBlockNasConfig() + + def __init__(self, search_space=None): + """Construct the AdelaideMutate class. + + :param search_space: Config of the search space + """ + super(DAGBlockNas, self).__init__(search_space) + self.search_space = search_space + self.sample_count = 0 + self.sampled_desc = [] + model_config = PipeStepConfig.model + self.org_model = ModelZoo().get_model(model_config.model_desc, model_config.pretrained_model_file) + + def search(self): + """Search block desc.""" + desc = self._do_search() + if not desc: + return None + hash_code = hash(str(desc)) + if hash_code in self.sampled_desc: + return None + self.sampled_desc.append(hash_code) + self.sample_count += 1 + # save records. + SpaceIterRecord().dump(self.get_local_worker_path(worker_id=self.sample_count)) + return dict(worker_id=self.sample_count - 1, encoded_desc=dict(desc)) + + def _do_search(self): + if self.config.mutation_method == 'random': + return dict(RandomMutation(self.search_space, self.org_model).run()) + # get last best model and weights. + records = ReportServer().get_pareto_front_records(choice=True) + if records: + model_desc = records[0].desc + pretrained_model_file = records[0].weights_file + else: + model_desc = PipeStepConfig.model.model_desc + pretrained_model_file = PipeStepConfig.model.pretrained_model_file + self.org_model = ModelZoo().get_model(model_desc, pretrained_model_file) + desc = dict(ProgressiveMutation(self.search_space, self.org_model).run()) + if self.config.check_latency: + target_model = ModelZoo().get_model(desc) + if not check_latency(target_model): + return None + return desc + + @property + def is_completed(self): + """Check is completed.""" + return self.sample_count >= self.config.num_samples + + @property + def max_samples(self): + """Get max samples number.""" + return self.config.num_samples + + +class RandomMutation(object): + """Do mutation with random sample.""" + + def __init__(self, search_space, model): + self.fused_blocks_nums_iter = SpaceIter(search_space, 'fused_blocks_nums') + self.block_type_iter = SpaceIter(search_space, 'block_type') + self.model = model + + def run(self): + """Run random mutation.""" + return self._do_mutation() + + def _do_mutation(self, fused_blocks_radio=1, mutated_blocks_radio=1): + """Run block mutate.""" + blocks = match_blocks(self.model) + target_desc = OrderedDict(copy.deepcopy(self.model.to_desc())) + blocks = self.fuse_sub_blocks(blocks, self.fused_blocks_nums_iter, fused_blocks_radio) + blocks.pop(0) + for block in blocks: + if random.uniform(0, 1) > mutated_blocks_radio: + continue + target_desc = mutate_sub_blocks(block, target_desc, self.block_type_iter) + return target_desc + + def fuse_sub_blocks(self, org_blocks, fused_blocks_nums_iter, fused_blocks_radio=1): + """Fuse sub block.""" + first_block = True + fused_blocks = [] + blocks = copy.deepcopy(org_blocks) + while blocks: + fuse_deep = next(fused_blocks_nums_iter) # random.choice(fused_blocks_nums) + need_fuse_blocks = [] + if first_block or len(blocks) <= 2 or random.uniform(0, 1) > fused_blocks_radio: + fused_blocks.append(blocks.pop(0)) + first_block = False + continue + counts = fuse_deep if fuse_deep < len(blocks) - 2 else len(blocks) - 2 + for _ in range(counts): + need_fuse_blocks.append(blocks.pop(0)) + fused_block = do_block_fusion(need_fuse_blocks) + logging.debug( + "fused block, start_name: {}, end_name: {}".format(fused_block.start_name, fused_block.end_name)) + fused_blocks.append(fused_block) + logging.info("before fuse block size:{}, fused blocks size:{}".format(len(org_blocks), len(fused_blocks))) + return fused_blocks + + +class ProgressiveMutation(object): + """Do progressive mutation.""" + + def __init__(self, search_space, model): + self.block_type_iter = SpaceIter(search_space, 'block_type') + self.model = model + + def run(self): + """Run.""" + blocks = match_blocks(self.model) + target_desc = OrderedDict(copy.deepcopy(self.model.to_desc())) + fused_block = self.sample_sub_blocks(blocks) + return mutate_sub_blocks(fused_block, target_desc, self.block_type_iter) + + def sample_sub_blocks_idx(self, block_size): + """Sample a sub blocks.""" + while True: + start_idx = random.choice(range(block_size - 2)) + fuse_len = random.choice(range(block_size - start_idx)) + if fuse_len > 2: + fused_block_idx = [start_idx + 1, start_idx + fuse_len] + return fused_block_idx + + def sample_sub_blocks(self, blocks): + """Chose one sub block.""" + s_idx = self.sample_sub_blocks_idx(len(blocks)) + sub_blocks = [block for idx, block in enumerate(blocks) if s_idx[0] < idx < s_idx[1]] + return do_block_fusion(sub_blocks) + + +def do_block_fusion(sub_blocks): + """Do fuse Blocks.""" + nodes = OrderedDict() + for block in sub_blocks: + nodes.update(block.nodes) + fused_block = sub_blocks[0] + fused_block._nodes = nodes + fused_block._end_name = sub_blocks[-1].end_name + logging.info("chose block, start_name: {}, end_name: {}".format(fused_block.start_name, fused_block.end_name)) + return fused_block diff --git a/vega/algorithms/nas/dag_block_nas/match_blocks.py b/vega/algorithms/nas/dag_block_nas/match_blocks.py new file mode 100644 index 0000000..51f8988 --- /dev/null +++ b/vega/algorithms/nas/dag_block_nas/match_blocks.py @@ -0,0 +1,213 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This is Match all blocks in network.""" +import json +import copy +import logging +from collections import OrderedDict +from vega.core.search_space.search_space import SearchSpace +from vega.algorithms.nas.dag_block_nas.block_generator import BlockGenerator +from vega.metrics.forward_latency import calc_forward_latency +from vega.common.class_factory import ClassFactory, ClassType + + +def is_connection_node(node): + """Determine is connection node.""" + return node.is_operator_conn_module or len(node.child_nodes) > 1 or node.module_type == 'torch_func_cat' + + +def check_latency(target_model, dummy_inputs=None): + """Validate network.""" + try: + import torch + x = dummy_inputs or torch.ones(1, 3, 224, 224) + calc_forward_latency(target_model, x) + except Exception as ex: + logging.info("sampled network is invalidate, ex={}".format(str(ex))) + return None + return True + + +class BlockItems(object): + """Blocks Items.""" + + def __init__(self): + self._nodes = OrderedDict() + self._start_name = None + self._end_name = None + + def add(self, name, node, start_node=False, end_node=False): + """Add a node into items.""" + self._nodes[name] = node + self._nodes.move_to_end(name, last=False) + if start_node: + self._start_name = name + if end_node: + self._end_name = name + + @property + def nodes(self): + """Get nodes.""" + return self._nodes + + @property + def c_in(self): + """Get input shape.""" + res = self._filter_modules('Conv2d') + if not res: + return None + return res[0].in_channels + + def _filter_modules(self, module_type): + res = [] + for name, node in self.nodes.items(): + if not hasattr(node.module, 'named_modules'): + continue + for module_name, module in node.module.named_modules(): + if module.__class__.__name__ == module_type: + res.append(module) + return res + + @property + def c_out(self): + """Get output shape.""" + res = self._filter_modules('Conv2d') + if not res: + return None + return res[-1].out_channels + + @property + def start_name(self): + """Get start name.""" + return self._start_name or next(iter(self.nodes)) + + @property + def end_name(self): + """Get end name.""" + return self._end_name or next(iter(reversed(self._nodes))) + + @property + def flops_params(self): + """Get Flops and Params.""" + dag_cls = ClassFactory.get_cls(ClassType.NETWORK, 'DagNetworkTorch') + block = dag_cls(self._nodes) + from vega.metrics.flops_and_params import calc_model_flops_params + import torch + x = torch.ones(2, self.c_in, 32, 32) + flops, params = calc_model_flops_params(block, x) + return flops * 1e-9, params * 1e-6 + + +def match_blocks_items(in_node): + """Match and list all sub blocks items.""" + items = BlockItems() + c_nodes = [in_node] + while c_nodes: + node = c_nodes.pop() + items.add(node.name, node) + for parent_node in node.parent_nodes: + if not is_connection_node(parent_node): + c_nodes.append(parent_node) + else: + items.add(parent_node.name, parent_node, start_node=True) + return items + + +def match_blocks(model): + """Match all blocks of dag network.""" + blocks = [] + for name, node in model.named_nodes(): + if is_connection_node(node): + blocks.append(match_blocks_items(node)) + return blocks + + +def mutate_sub_blocks(block, target_desc, block_type_iter): + """Mutate Sub Blocks.""" + if not block.c_in or not block.c_out: + return target_desc + block_type = next(block_type_iter) # random.choice(block_type) + target_block = BlockGenerator(c_in=block.c_in, block_type=block_type).run(block) + + if target_block: + return mutate_block(target_desc, block, target_block) + return target_desc + + +def mutate_block(model_desc, mutated_block, target_block=None): + """Mutate block.""" + logging.info("Mutate blocks start module name: {}, end module name: {}, target module desc: {}:".format( + mutated_block.start_name, mutated_block.end_name, target_block)) + mutated_map = OrderedDict() + while model_desc: + name, node = model_desc.popitem(0) + if name != 'type': + node = json.loads(node) if isinstance(node, str) else node + if name not in mutated_block.nodes: + mutated_map[name] = node + continue + if name == mutated_block.end_name: + mutated_map[name] = dict(name=name, module=target_block, module_type=target_block.get("type"), + parent_node_names=[mutated_block.start_name], + child_node_names=node.get("child_node_names")) + elif name == mutated_block.start_name: + tmp_node = copy.deepcopy(node) + tmp_node["child_node_names"] = [mutated_block.end_name] + tmp_node["child_nodes"] = [] + mutated_map[name] = tmp_node + return mutated_map + + +class SpaceIterRecord(object): + """Record Space iter.""" + + __records__ = [] + + @classmethod + def add_record(cls, record): + """Add one record.""" + cls.__records__.append(record) + + @classmethod + def clear(cls): + """Clear records.""" + cls.__records__ = [] + + @classmethod + def dump(cls, file_path): + """Dump record.""" + with open("{}/dag_block_nas.json".format(file_path), 'w') as f: + json.dump(cls.__records__, f) + cls.clear() + + +class SpaceIter(object): + """Get Space iter.""" + + def __init__(self, search_space, name): + self.name = name + self.space = list(filter(lambda x: x.get("key") == name, search_space.get("hyperparameters"))) + self.search_space = SearchSpace(dict(hyperparameters=self.space)) + + def __iter__(self): + """Get iter.""" + return self + + def __next__(self): + """Get next sample.""" + res = self.search_space.sample() + SpaceIterRecord.add_record(res) + return res.get(self.name) diff --git a/vega/algorithms/nas/dag_mutate/__init__.py b/vega/algorithms/nas/dag_mutate/__init__.py deleted file mode 100644 index e5265b3..0000000 --- a/vega/algorithms/nas/dag_mutate/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .mutate import DAGMutateSearchSpace - -__all__ = ["DAGMutateSearchSpace"] diff --git a/vega/algorithms/nas/dag_mutate/mutate.py b/vega/algorithms/nas/dag_mutate/mutate.py deleted file mode 100644 index f1a67ce..0000000 --- a/vega/algorithms/nas/dag_mutate/mutate.py +++ /dev/null @@ -1,93 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""This is DAG Cell for network.""" -import copy -import json -import random -import logging -from collections import OrderedDict -from vega.common import ClassFactory, ClassType, Config -from vega.core.search_space import SearchSpace -from vega.core.pipeline.conf import PipeStepConfig -from vega.model_zoo import ModelZoo -from vega.algorithms.nas.dag_mutate.search_blocks import search_blocks, forward_block_out_shape - - -@ClassFactory.register(ClassType.SEARCHSPACE) -class DAGMutateSearchSpace(SearchSpace): - """Prune SearchSpace.""" - - @classmethod - def to_desc(self, desc): - """Decode to model desc.""" - if not hasattr(self, "model") or not self.model: - self.model = ModelZoo().get_model(PipeStepConfig.model.model_desc, - PipeStepConfig.model.pretrained_model_file) - model = copy.deepcopy(self.model) - blocks = search_blocks(model) - target_desc = OrderedDict(copy.deepcopy(model.to_desc())) - return mutate_blocks(blocks, target_desc, desc) - - -def mutate_blocks(blocks, target_desc, block_desc): - """Mutate Block.""" - target_block = generate_d_blocks(block_desc) - logging.info("generate d block: {}".format(target_block)) - mutated_blocks = [block for block in blocks if - block.c_in == target_block.c_in and block.c_out == target_block.c_out] - if not mutate_blocks: - return None - mutated_desc = target_desc - for block in mutated_blocks: - if random.uniform(0, 1) > 0.5: - continue - mutated_desc = mutate_block(mutated_desc, block, target_block) - return mutated_desc - - -def generate_d_blocks(block_desc): - """Generate d blocks.""" - block_str = block_desc.get("block_str") - stride = block_desc.get('stride') - c_in = block_desc.get('c_in') - ops = block_desc.get('ops') or ['conv3', 'conv1', 'conv3_grp2', 'conv3_grp4', 'conv3_base1', 'conv3_base32', - 'conv3_sep'] - target_block = Config(dict(type="EncodedBlock", block_str=block_str, in_channel=c_in, op_names=ops, stride=stride)) - target_block.c_in = c_in - target_block.c_out = forward_block_out_shape(target_block, [1, c_in, 32, 32], idx=1) - return target_block - - -def mutate_block(model_desc, mutated_block, target_block=None): - """Mutate block.""" - if not mutated_block.c_in or not mutated_block.c_out: - return None - if not (mutated_block.c_in == target_block.c_in and mutated_block.c_out == target_block.c_out): - return None - logging.info("Mutate blocks start module name: {}, end module name: {}".format( - mutated_block.start_name, mutated_block.end_name)) - mutated_map = OrderedDict() - while model_desc: - name, node = model_desc.popitem(0) - if name != 'type': - node = json.loads(node) if isinstance(node, str) else node - if name not in mutated_block.nodes: - mutated_map[name] = node - continue - if name == mutated_block.end_name: - mutated_map[name] = dict(name=name, module=target_block, module_type=target_block.get("type"), - parent_node_names=[mutated_block.start_name], - child_node_names=node.get("child_node_names")) - elif name == mutated_block.start_name: - tmp_node = copy.deepcopy(node) - tmp_node["child_node_names"] = [mutated_block.end_name] - tmp_node["child_nodes"] = [] - mutated_map[name] = tmp_node - return mutated_map diff --git a/vega/algorithms/nas/dag_mutate/search_blocks.py b/vega/algorithms/nas/dag_mutate/search_blocks.py deleted file mode 100644 index 7fc59a4..0000000 --- a/vega/algorithms/nas/dag_mutate/search_blocks.py +++ /dev/null @@ -1,99 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""This is Search all blocks in network.""" -from collections import OrderedDict -from vega.common import ClassFactory, ClassType - - -def is_connection_node(node): - """Determine is connection node.""" - return node.is_operator_conn_module or len(node.child_nodes) > 1 or node.module_type == 'torch_func_cat' - - -class BlockItems(object): - """Blocks Items.""" - - def __init__(self): - self._nodes = OrderedDict() - self._start_name = None - self._end_name = None - - def add(self, name, node, start_node=False, end_node=False): - """Add a node into items.""" - self._nodes[name] = node - if start_node: - self._start_name = name - if end_node: - self._end_name = name - - @property - def nodes(self): - """Get nodes.""" - return self._nodes - - @property - def c_in(self): - """Get input shape.""" - convs = [node for name, node in self.nodes.items() if node.module_type == 'Conv2d'] - if convs: - return convs[-1].module.in_channels - return None # in_node - - @property - def c_out(self): - """Get output shape.""" - convs = [node for name, node in self.nodes.items() if node.module_type == 'Conv2d'] - if convs: - return convs[0].module.out_channels - return 256 - - @property - def start_name(self): - """Get start name.""" - return self._start_name or next(iter(reversed(self._nodes))) - - @property - def end_name(self): - """Get end name.""" - return self._end_name or next(iter(self._nodes)) - - -def search_blocks_items(in_node): - """Search and list all sub blocks items.""" - items = BlockItems() - c_nodes = [in_node] - while c_nodes: - node = c_nodes.pop() - items.add(node.name, node) - for parent_node in node.parent_nodes: - if not is_connection_node(parent_node): - c_nodes.append(parent_node) - else: - items.add(parent_node.name, parent_node, start_node=True) - return items - - -def search_blocks(model): - """Search all blocks of dag network.""" - blocks = [] - for name, node in model.named_nodes(): - if is_connection_node(node): - blocks.append(search_blocks_items(node)) - return blocks - - -def forward_block_out_shape(block_desc, input_shape, idx=None): - """Forward blocks.""" - import torch - block = ClassFactory.get_instance(ClassType.NETWORK, block_desc) - out_shape = block(torch.ones(*input_shape)).shape - if idx: - return out_shape[idx] - return out_shape diff --git a/vega/algorithms/nas/darts_cnn/darts_codec.py b/vega/algorithms/nas/darts_cnn/darts_codec.py index afcae22..2badecb 100644 --- a/vega/algorithms/nas/darts_cnn/darts_codec.py +++ b/vega/algorithms/nas/darts_cnn/darts_codec.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Codec of DARTS.""" import copy diff --git a/vega/algorithms/nas/darts_cnn/darts_fully_trainer_callback.py b/vega/algorithms/nas/darts_cnn/darts_fully_trainer_callback.py index 9a57312..294b75c 100644 --- a/vega/algorithms/nas/darts_cnn/darts_fully_trainer_callback.py +++ b/vega/algorithms/nas/darts_cnn/darts_fully_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """DARTS FUll trainer.""" from vega.common import ClassFactory, ClassType diff --git a/vega/algorithms/nas/darts_cnn/darts_network_config.py b/vega/algorithms/nas/darts_cnn/darts_network_config.py index 0ed1315..9dee88d 100644 --- a/vega/algorithms/nas/darts_cnn/darts_network_config.py +++ b/vega/algorithms/nas/darts_cnn/darts_network_config.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Contains Default and User configuration.""" import os diff --git a/vega/algorithms/nas/darts_cnn/darts_trainer_callback.py b/vega/algorithms/nas/darts_cnn/darts_trainer_callback.py index 14b04ae..a8c0e8f 100644 --- a/vega/algorithms/nas/darts_cnn/darts_trainer_callback.py +++ b/vega/algorithms/nas/darts_cnn/darts_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """DARTS trainer.""" import logging @@ -56,10 +62,8 @@ def before_epoch(self, epoch, logs=None): def before_train_step(self, epoch, logs=None): """Be called before a batch training.""" - # Get current train batch directly from logs train_batch = logs['train_batch'] train_input, train_target = train_batch - # Prepare valid batch data by using valid loader from trainer try: valid_input, valid_target = next(self.valid_loader_iter) except Exception: @@ -69,7 +73,6 @@ def before_train_step(self, epoch, logs=None): valid_input, valid_target = valid_input.to(int(self.device)), valid_target.to(int(self.device)) else: valid_input, valid_target = valid_input.to(self.device), valid_target.to(self.device) - # Call arch search step self._train_arch_step(train_input, train_target, valid_input, valid_target) def after_epoch(self, epoch, logs=None): @@ -100,7 +103,6 @@ def map_to_dict(td, vd): dataset = tf.data.Dataset.zip((self.trainer.train_loader.input_fn(), self.trainer.valid_loader.input_fn())) dataset = dataset.map(lambda td, vd: map_to_dict(td, vd)) - # dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) return dataset def model_fn(self, features, labels, mode): @@ -111,7 +113,6 @@ def model_fn(self, features, labels, mode): if mode == tf.estimator.ModeKeys.TRAIN: features, valid_features = features['train'], features['valid'] labels, valid_labels = labels['train'], labels['valid'] - # update arch epoch = tf.cast(global_step, tf.float32) / tf.cast(len(self.trainer.train_loader), tf.float32) self.trainer.optimizer = Optimizer()(distributed=self.trainer.horovod) self.trainer.lr_scheduler = LrScheduler()(self.trainer.optimizer) @@ -146,11 +147,9 @@ def _get_arch_weights(self): elif vega.is_tf_backend(): sess_config = self.trainer._init_session_config() with tf.compat.v1.Session(config=sess_config) as sess: - # tf.reset_default_graph() checkpoint_file = tf.train.latest_checkpoint(self.trainer.get_local_worker_path()) saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file)) saver.restore(sess, checkpoint_file) - # initializer is necessary here sess.run(tf.global_variables_initializer()) arch_weights = self.model.arch_weights arch_weights = [weight.eval() for weight in arch_weights] diff --git a/vega/algorithms/nas/dnet_nas/conf.py b/vega/algorithms/nas/dnet_nas/conf.py index 9e96fbf..88294e5 100644 --- a/vega/algorithms/nas/dnet_nas/conf.py +++ b/vega/algorithms/nas/dnet_nas/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" diff --git a/vega/algorithms/nas/dnet_nas/dblock_nas.py b/vega/algorithms/nas/dnet_nas/dblock_nas.py index 4673297..9c7295c 100644 --- a/vega/algorithms/nas/dnet_nas/dblock_nas.py +++ b/vega/algorithms/nas/dnet_nas/dblock_nas.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined DnetNas.""" import logging @@ -29,7 +35,6 @@ class DblockNas(SearchAlgorithm): def __init__(self, search_space=None, **kwargs): """Init DnetNas.""" super(DblockNas, self).__init__(search_space, **kwargs) - # ea or random self.max_sample = self.config.range.max_sample self.min_sample = self.config.range.min_sample self.sample_count = 0 @@ -47,8 +52,6 @@ def search(self): """Search in search_space and return a sample.""" sample = {} while sample is None or 'code' not in sample: - # pareto_dict = self.pareto_front.get_pareto_front() - # pareto_list = list(pareto_dict.values()) sample_desc = self.search_space.sample() sample = self.codec.encode(sample_desc) if not self.pareto_front._add_to_board(id=self.sample_count + 1, diff --git a/vega/algorithms/nas/dnet_nas/dblock_nas_codec.py b/vega/algorithms/nas/dnet_nas/dblock_nas_codec.py index f90206a..88d48ad 100644 --- a/vega/algorithms/nas/dnet_nas/dblock_nas_codec.py +++ b/vega/algorithms/nas/dnet_nas/dblock_nas_codec.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined DblockNasCodec.""" import copy @@ -50,35 +56,7 @@ def encode(self, sample_desc, is_random=False): skip_num = sample_desc['network.backbone.skip_num'] base_channel = sample_desc['network.backbone.base_channel'] - block_coding = '' - for i in range(op_num): - op_index = random.randint(0, op_choices - 1) - channel_index = random.randint(0, channel_choices - 1) - if i < op_num - 1: - block_coding += str(op_index) + str(channel_index) - else: - block_coding += str(op_index) - - skip_set = [] - for i in range(op_num + 1): - for j in range(i + 1, op_num + 1): - if i == 0 and j == op_num: - continue - skip_set.append(str(i) + str(j)) - - skip_num = min(skip_num, len(skip_set)) - skip_indexes = list(np.random.permutation(skip_num)) - skip_indexes.sort() - - skip_coding = '' - for skip_index in skip_indexes: - if random.randint(0, 1) == 0: - skip_type = 'a' - else: - skip_type = 'c' - skip_coding += skip_type + skip_set[skip_index] - - block_coding += '-' + skip_coding + block_coding = decode_d_block_str(op_choices, channel_choices, op_num, skip_num) code = {} code['network.backbone.block_coding'] = block_coding @@ -107,3 +85,38 @@ def decode(self, sample): desc['network.backbone.encoding'] = f'{block_coding}_{base_channel}_{marco_coding}' return desc + + +def decode_d_block_str(op_choices, channel_choices, op_num, skip_num): + """Decode d block str.""" + + block_coding = '' + for i in range(op_num): + op_index = random.randint(0, op_choices - 1) + channel_index = random.randint(0, channel_choices - 1) + if i < op_num - 1: + block_coding += str(op_index) + str(channel_index) + else: + block_coding += str(op_index) + + skip_set = [] + for i in range(op_num + 1): + for j in range(i + 1, op_num + 1): + if i == 0 and j == op_num: + continue + skip_set.append(str(i) + str(j)) + + skip_num = min(skip_num, len(skip_set)) + skip_indexes = list(np.random.permutation(skip_num)) + skip_indexes.sort() + + skip_coding = '' + for skip_index in skip_indexes: + if random.randint(0, 1) == 0: + skip_type = 'a' + else: + skip_type = 'c' + skip_coding += skip_type + skip_set[skip_index] + + block_coding += '-' + skip_coding + return block_coding diff --git a/vega/algorithms/nas/dnet_nas/dnet_nas.py b/vega/algorithms/nas/dnet_nas/dnet_nas.py index fdf7074..d98c901 100644 --- a/vega/algorithms/nas/dnet_nas/dnet_nas.py +++ b/vega/algorithms/nas/dnet_nas/dnet_nas.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined DnetNas.""" import random -import numpy as np import logging +import numpy as np from vega.core.search_algs import SearchAlgorithm from vega.core.search_algs import ParetoFront from vega.common import ClassFactory, ClassType diff --git a/vega/algorithms/nas/dnet_nas/dnet_nas_codec.py b/vega/algorithms/nas/dnet_nas/dnet_nas_codec.py index 7301a33..6f80f4d 100644 --- a/vega/algorithms/nas/dnet_nas/dnet_nas_codec.py +++ b/vega/algorithms/nas/dnet_nas/dnet_nas_codec.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined DnetNasCodec.""" import copy diff --git a/vega/algorithms/nas/esr_ea/conf.py b/vega/algorithms/nas/esr_ea/conf.py index eda37d0..eb2c704 100644 --- a/vega/algorithms/nas/esr_ea/conf.py +++ b/vega/algorithms/nas/esr_ea/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/nas/esr_ea/esr_ea_codec.py b/vega/algorithms/nas/esr_ea/esr_ea_codec.py index 1e58b9a..94dcdad 100644 --- a/vega/algorithms/nas/esr_ea/esr_ea_codec.py +++ b/vega/algorithms/nas/esr_ea/esr_ea_codec.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Encode and decode the model config. for ESR.""" -from vega.core.search_algs.codec import Codec +import logging from copy import deepcopy import numpy as np -import logging +from vega.core.search_algs.codec import Codec from vega.common import ClassType, ClassFactory diff --git a/vega/algorithms/nas/esr_ea/esr_ea_individual.py b/vega/algorithms/nas/esr_ea/esr_ea_individual.py index 5172dd6..b7eb87b 100644 --- a/vega/algorithms/nas/esr_ea/esr_ea_individual.py +++ b/vega/algorithms/nas/esr_ea/esr_ea_individual.py @@ -1,16 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The Individual definition of ESR_EA algorithm.""" from bisect import bisect_right -from random import random +import random import numpy as np from .conf import ESRRangeConfig @@ -42,7 +48,7 @@ def init_gene(self): """Initialize the gene randomly.""" for gene_ind in range(self.node_num): type_prob = self.net_info.func_prob - self.gene[gene_ind][1] = bisect_right(type_prob, random()) + self.gene[gene_ind][1] = bisect_right(type_prob, random.random()) self.gene[gene_ind][0] = np.random.randint(2) def using_node_num(self): @@ -183,7 +189,7 @@ def mutation_node(self, mutation_rate=0.05): for node_ind in range(self.node_num): if self.gene[node_ind][0] and np.random.rand() < mutation_rate: type_prob = self.net_info.func_prob - self.gene[node_ind][1] = bisect_right(type_prob, random()) + self.gene[node_ind][1] = bisect_right(type_prob, random.random()) self.active_net = self.active_net_list() self.parameter = self.network_parameter() self.flops = self.network_flops() diff --git a/vega/algorithms/nas/esr_ea/esr_ea_trainer_callback.py b/vega/algorithms/nas/esr_ea/esr_ea_trainer_callback.py index 9c6a9cc..00524ad 100644 --- a/vega/algorithms/nas/esr_ea/esr_ea_trainer_callback.py +++ b/vega/algorithms/nas/esr_ea/esr_ea_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The trainer program for ESR_EA.""" from vega.common import ClassFactory, ClassType diff --git a/vega/algorithms/nas/esr_ea/esr_search.py b/vega/algorithms/nas/esr_ea/esr_search.py index c3fb61e..2cbe5bf 100644 --- a/vega/algorithms/nas/esr_ea/esr_search.py +++ b/vega/algorithms/nas/esr_ea/esr_search.py @@ -1,27 +1,34 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """search algorithm for ESR_EA.""" + import csv import logging import os +import random from bisect import bisect_right -from random import random, sample import numpy as np import pandas as pd from vega.common.general import General -from .conf import ESRConfig from vega.common import FileOps from vega.common import ClassFactory, ClassType from vega.core.search_algs import SearchAlgorithm from .esr_ea_individual import ESRIndividual +from .conf import ESRConfig @ClassFactory.register(ClassType.SEARCH_ALGORITHM) @@ -161,7 +168,7 @@ def parent_select(self, parent_num=2, select_type='Tournament'): fitness_all = np.asarray(fitness_all) if select_type == 'Tournament': for i in range(parent_num): - tourn = sample(range(len(popu_all)), 2) + tourn = random.sample(range(len(popu_all)), 2) if fitness_all[tourn[0]] >= fitness_all[tourn[1]]: parent[i].copy(popu_all[tourn[0]]) fitness_all[tourn[0]] = 0 @@ -173,7 +180,7 @@ def parent_select(self, parent_num=2, select_type='Tournament'): eval_norm = eval_submean / sum(eval_submean) eva_threshold = np.cumsum(eval_norm) for i in range(parent_num): - ran = random() + ran = random.random() selec_id = bisect_right(eva_threshold, ran) parent[i].copy(popu_all[selec_id]) eval_submean[selec_id] = 0 @@ -202,7 +209,7 @@ def get_mutate_child(self, muta_num): if int(self.individual_num / 2) == len(self.elitism): self.pop[i].copy(self.elitism[i]) else: - self.pop[i].copy(sample(self.elitism, 1)[0]) + self.pop[i].copy(random.sample(self.elitism, 1)[0]) self.pop[i].mutation_using(self.mutation_rate) while self.pop[i].active_num < self.min_active: self.pop[i].mutation_using(self.mutation_rate) diff --git a/vega/algorithms/nas/fis/__init__.py b/vega/algorithms/nas/fis/__init__.py index eb6a181..d2c9fe0 100644 --- a/vega/algorithms/nas/fis/__init__.py +++ b/vega/algorithms/nas/fis/__init__.py @@ -1,8 +1,8 @@ -import vega -if vega.is_torch_backend(): - from .ctr_trainer_callback import CtrTrainerCallback - from .autogroup_trainer_callback import AutoGroupTrainerCallback - from .autogate_s1_trainer_callback import AutoGateS1TrainerCallback - from .autogate_s2_trainer_callback import AutoGateS2TrainerCallback - from .autogate_grda_s1_trainer_callback import AutoGateGrdaS1TrainerCallback - from .autogate_grda_s2_trainer_callback import AutoGateGrdaS2TrainerCallback +import vega +if vega.is_torch_backend(): + from .ctr_trainer_callback import CtrTrainerCallback + from .autogroup_trainer_callback import AutoGroupTrainerCallback + from .autogate_s1_trainer_callback import AutoGateS1TrainerCallback + from .autogate_s2_trainer_callback import AutoGateS2TrainerCallback + from .autogate_grda_s1_trainer_callback import AutoGateGrdaS1TrainerCallback + from .autogate_grda_s2_trainer_callback import AutoGateGrdaS2TrainerCallback diff --git a/vega/algorithms/nas/fis/autogate_grda_s1_trainer_callback.py b/vega/algorithms/nas/fis/autogate_grda_s1_trainer_callback.py index c5dfb38..d0ee360 100644 --- a/vega/algorithms/nas/fis/autogate_grda_s1_trainer_callback.py +++ b/vega/algorithms/nas/fis/autogate_grda_s1_trainer_callback.py @@ -1,139 +1,143 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""AutoGate Grda version Stage1 TrainerCallback.""" - -import logging -from vega.common import ClassFactory, ClassType -from vega.common import FileOps -from vega.algorithms.nas.fis.ctr_trainer_callback import CtrTrainerCallback -import torch.optim as optim -from vega.algorithms.nas.fis.grda import gRDA - -logger = logging.getLogger(__name__) - - -@ClassFactory.register(ClassType.CALLBACK) -class AutoGateGrdaS1TrainerCallback(CtrTrainerCallback): - """AutoGateGrdaS1TrainerCallback module.""" - - def __init__(self): - """Construct AutoGateGrdaS1TrainerCallback class.""" - super(CtrTrainerCallback, self).__init__() - self.best_score = 0 - - logging.info("init autogate s1 trainer callback") - - def before_train(self, logs=None): - """Be called before the training process.""" - self.config = self.trainer.config - all_parameters = self.trainer.model.parameters() - structure_params = self.trainer.model.structure_params - net_params = [i for i in all_parameters if i not in structure_params] - self.struc_optimizer = self._init_structure_optimizer(structure_params) - self.net_optimizer = self._init_network_optimizer(net_params) - - def _init_structure_optimizer(self, learnable_params): - """ - Init structure optimizer for optimize structure params in autogate model. - - :param learnable_params: learnable structure params - :type learnable_params: list object - :return: optimizer object - :rtype: torch.optim.Optimizer - """ - logging.info("init net optimizer, lr = {}".format(self.config.struc_optim.struct_lr)) - optimizer = gRDA(learnable_params, lr=float(self.config.struc_optim.struct_lr), - c=float(self.config.c), mu=float(self.config.mu)) - - logging.info("init structure optimizer finish.") - return optimizer - - def _init_network_optimizer(self, learnable_params): - """ - Init structure optimizer for optimize structure params in autogate model. - - :param learnable_params: learnable network params - :type learnable_params: list object - :return: optimizer object - :rtype: torch.optim.Optimizer - """ - logging.info("init net optimizer, lr = {}".format(self.config.net_optim.net_lr)) - optimizer = optim.Adam(learnable_params, lr=float(self.config.net_optim.net_lr)) - - logging.info("init structure optimizer finish.") - return optimizer - - def train_step(self, batch): - """ - Training progress for a batch data train net_param and struct_param step by step (iteratly). - - :param batch: batch train data. - :type batch: list object - :return: loss & training loss - :rtype: dict object - """ - self.trainer.model.train() - input, target = batch - - self.net_optimizer.zero_grad() - self.struc_optimizer.zero_grad() - - output = self.trainer.model(input) - - loss = self.trainer.loss(output, target) - loss.backward() - self.net_optimizer.step() - self.struc_optimizer.step() - - return {'loss': loss.item(), - 'train_batch_output': output, - 'lr': self.trainer.lr_scheduler.get_lr()} - - def valid_step(self, batch): - """ - Validate progress for a batch data. - - :param batch: batch data - :type object - :return: valid batch output - :rtype: dict object - """ - input, target = batch - - output = self.trainer.model(input) - return {'valid_batch_output': output} - - def after_valid(self, logs=None): - """Call after_valid of the managed callbacks.""" - self.model = self.trainer.model - feature_interaction_score = self.model.get_feature_interaction_score() - print('get feature_interaction_score', feature_interaction_score) - feature_interaction = [] - for feature in feature_interaction_score: - if abs(feature_interaction_score[feature]) > 0: - feature_interaction.append(feature) - print('get feature_interaction', feature_interaction) - - curr_auc = float(self.trainer.valid_metrics.results['auc']) - if curr_auc > self.best_score: - best_config = { - 'score': curr_auc, - 'feature_interaction': feature_interaction - } - - logging.info("BEST CONFIG IS\n{}".format(best_config)) - pickle_result_file = FileOps.join_path( - self.trainer.local_output_path, 'best_config.pickle') - logging.info("Saved to {}".format(pickle_result_file)) - FileOps.dump_pickle(best_config, pickle_result_file) - - self.best_score = curr_auc - - # TaskU.upload_task_folder() +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""AutoGate Grda version Stage1 TrainerCallback.""" + +import logging +from vega.common import ClassFactory, ClassType +from vega.common import FileOps +from vega.algorithms.nas.fis.ctr_trainer_callback import CtrTrainerCallback +import torch.optim as optim +from vega.algorithms.nas.fis.grda import gRDA + +logger = logging.getLogger(__name__) + + +@ClassFactory.register(ClassType.CALLBACK) +class AutoGateGrdaS1TrainerCallback(CtrTrainerCallback): + """AutoGateGrdaS1TrainerCallback module.""" + + def __init__(self): + """Construct AutoGateGrdaS1TrainerCallback class.""" + super(CtrTrainerCallback, self).__init__() + self.best_score = 0 + + logging.info("init autogate s1 trainer callback") + + def before_train(self, logs=None): + """Be called before the training process.""" + self.config = self.trainer.config + all_parameters = self.trainer.model.parameters() + structure_params = self.trainer.model.structure_params + net_params = [i for i in all_parameters if i not in structure_params] + self.struc_optimizer = self._init_structure_optimizer(structure_params) + self.net_optimizer = self._init_network_optimizer(net_params) + + def _init_structure_optimizer(self, learnable_params): + """ + Init structure optimizer for optimize structure params in autogate model. + + :param learnable_params: learnable structure params + :type learnable_params: list object + :return: optimizer object + :rtype: torch.optim.Optimizer + """ + logging.info("init net optimizer, lr = {}".format(self.config.struc_optim.struct_lr)) + optimizer = gRDA(learnable_params, lr=float(self.config.struc_optim.struct_lr), + c=float(self.config.c), mu=float(self.config.mu)) + + logging.info("init structure optimizer finish.") + return optimizer + + def _init_network_optimizer(self, learnable_params): + """ + Init structure optimizer for optimize structure params in autogate model. + + :param learnable_params: learnable network params + :type learnable_params: list object + :return: optimizer object + :rtype: torch.optim.Optimizer + """ + logging.info("init net optimizer, lr = {}".format(self.config.net_optim.net_lr)) + optimizer = optim.Adam(learnable_params, lr=float(self.config.net_optim.net_lr)) + + logging.info("init structure optimizer finish.") + return optimizer + + def train_step(self, batch): + """ + Training progress for a batch data train net_param and struct_param step by step (iteratly). + + :param batch: batch train data. + :type batch: list object + :return: loss & training loss + :rtype: dict object + """ + self.trainer.model.train() + input, target = batch + + self.net_optimizer.zero_grad() + self.struc_optimizer.zero_grad() + + output = self.trainer.model(input) + + loss = self.trainer.loss(output, target) + loss.backward() + self.net_optimizer.step() + self.struc_optimizer.step() + + return {'loss': loss.item(), + 'train_batch_output': output, + 'lr': self.trainer.lr_scheduler.get_lr()} + + def valid_step(self, batch): + """ + Validate progress for a batch data. + + :param batch: batch data + :type object + :return: valid batch output + :rtype: dict object + """ + input, target = batch + + output = self.trainer.model(input) + return {'valid_batch_output': output} + + def after_valid(self, logs=None): + """Call after_valid of the managed callbacks.""" + self.model = self.trainer.model + feature_interaction_score = self.model.get_feature_interaction_score() + print('get feature_interaction_score', feature_interaction_score) + feature_interaction = [] + for feature in feature_interaction_score: + if abs(feature_interaction_score[feature]) > 0: + feature_interaction.append(feature) + print('get feature_interaction', feature_interaction) + + curr_auc = float(self.trainer.valid_metrics.results['auc']) + if curr_auc > self.best_score: + best_config = { + 'score': curr_auc, + 'feature_interaction': feature_interaction + } + + logging.info("BEST CONFIG IS\n{}".format(best_config)) + pickle_result_file = FileOps.join_path( + self.trainer.local_output_path, 'best_config.pickle') + logging.info("Saved to {}".format(pickle_result_file)) + FileOps.dump_pickle(best_config, pickle_result_file) + + self.best_score = curr_auc diff --git a/vega/algorithms/nas/fis/autogate_grda_s2_trainer_callback.py b/vega/algorithms/nas/fis/autogate_grda_s2_trainer_callback.py index 2e905bf..e88c868 100644 --- a/vega/algorithms/nas/fis/autogate_grda_s2_trainer_callback.py +++ b/vega/algorithms/nas/fis/autogate_grda_s2_trainer_callback.py @@ -1,62 +1,68 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""AutoGate Grda version Stage2 TrainerCallback.""" - -import logging -import pandas as pd -from vega.common import ClassFactory, ClassType -from vega.common import FileOps -from vega.algorithms.nas.fis.ctr_trainer_callback import CtrTrainerCallback -from vega.core.pipeline.conf import ModelConfig - -logger = logging.getLogger(__name__) - - -@ClassFactory.register(ClassType.CALLBACK) -class AutoGateGrdaS2TrainerCallback(CtrTrainerCallback): - """AutoGateGrdaS2TrainerCallback module.""" - - def __init__(self): - """Construct AutoGateGrdaS2TrainerCallback class.""" - super(CtrTrainerCallback, self).__init__() - self.sieve_board = pd.DataFrame( - columns=['selected_feature_pairs', 'score']) - self.selected_pairs = list() - - logging.info("init autogate s2 trainer callback") - - def before_train(self, logs=None): - """Call before_train of the managed callbacks.""" - super().before_train(logs) - - """Be called before the training process.""" - hpo_result = FileOps.load_pickle(FileOps.join_path( - self.trainer.local_output_path, 'best_config.pickle')) - logging.info("loading stage1_hpo_result \n{}".format(hpo_result)) - - self.selected_pairs = hpo_result['feature_interaction'] - logging.info(f'feature_interaction: {self.selected_pairs}') - - # add selected_pairs - setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs) - - def after_train(self, logs=None): - """Call after_train of the managed callbacks.""" - curr_auc = float(self.trainer.valid_metrics.results['auc']) - - self.sieve_board = self.sieve_board.append( - { - 'selected_feature_pairs': self.selected_pairs, - 'score': curr_auc - }, ignore_index=True) - result_file = FileOps.join_path( - self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__)) - - self.sieve_board.to_csv(result_file, sep='\t') +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""AutoGate Grda version Stage2 TrainerCallback.""" + +import logging +import pandas as pd +from vega.common import ClassFactory, ClassType +from vega.common import FileOps +from vega.algorithms.nas.fis.ctr_trainer_callback import CtrTrainerCallback +from vega.core.pipeline.conf import ModelConfig + +logger = logging.getLogger(__name__) + + +@ClassFactory.register(ClassType.CALLBACK) +class AutoGateGrdaS2TrainerCallback(CtrTrainerCallback): + """AutoGateGrdaS2TrainerCallback module.""" + + def __init__(self): + """Construct AutoGateGrdaS2TrainerCallback class.""" + super(CtrTrainerCallback, self).__init__() + self.sieve_board = pd.DataFrame( + columns=['selected_feature_pairs', 'score']) + self.selected_pairs = list() + + logging.info("init autogate s2 trainer callback") + + def before_train(self, logs=None): + """Call before_train of the managed callbacks.""" + super().before_train(logs) + + """Be called before the training process.""" + hpo_result = FileOps.load_pickle(FileOps.join_path( + self.trainer.local_output_path, 'best_config.pickle')) + logging.info("loading stage1_hpo_result \n{}".format(hpo_result)) + + self.selected_pairs = hpo_result['feature_interaction'] + logging.info(f'feature_interaction: {self.selected_pairs}') + + # add selected_pairs + setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs) + + def after_train(self, logs=None): + """Call after_train of the managed callbacks.""" + curr_auc = float(self.trainer.valid_metrics.results['auc']) + + self.sieve_board = self.sieve_board.append( + { + 'selected_feature_pairs': self.selected_pairs, + 'score': curr_auc + }, ignore_index=True) + result_file = FileOps.join_path( + self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__)) + + self.sieve_board.to_csv(result_file, sep='\t') diff --git a/vega/algorithms/nas/fis/autogate_s1_trainer_callback.py b/vega/algorithms/nas/fis/autogate_s1_trainer_callback.py index b88662f..345d45a 100644 --- a/vega/algorithms/nas/fis/autogate_s1_trainer_callback.py +++ b/vega/algorithms/nas/fis/autogate_s1_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """AutoGate top-k version Stage1 TrainerCallback.""" diff --git a/vega/algorithms/nas/fis/autogate_s2_trainer_callback.py b/vega/algorithms/nas/fis/autogate_s2_trainer_callback.py index f706fc6..554f130 100644 --- a/vega/algorithms/nas/fis/autogate_s2_trainer_callback.py +++ b/vega/algorithms/nas/fis/autogate_s2_trainer_callback.py @@ -1,71 +1,77 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""AutoGate top-k version Stage2 TrainerCallback.""" - -import logging -import pandas as pd -from vega.common import ClassFactory, ClassType -from vega.common import FileOps -from vega.algorithms.nas.fis.ctr_trainer_callback import CtrTrainerCallback -from vega.core.pipeline.conf import ModelConfig - -logger = logging.getLogger(__name__) - - -@ClassFactory.register(ClassType.CALLBACK) -class AutoGateS2TrainerCallback(CtrTrainerCallback): - """AutoGateS2TrainerCallback module.""" - - def __init__(self): - """Construct AutoGateS2TrainerCallback class.""" - super(CtrTrainerCallback, self).__init__() - self.sieve_board = pd.DataFrame( - columns=['selected_feature_pairs', 'score']) - self.selected_pairs = list() - - logging.info("init autogate s2 trainer callback") - - def before_train(self, logs=None): - """Call before_train of the managed callbacks.""" - super().before_train(logs) - - """Be called before the training process.""" - hpo_result = FileOps.load_pickle(FileOps.join_path( - self.trainer.local_output_path, 'best_config.pickle')) - logging.info("loading stage1_hpo_result \n{}".format(hpo_result)) - - feature_interaction_score = hpo_result['feature_interaction_score'] - print('feature_interaction_score:', feature_interaction_score) - sorted_pairs = sorted(feature_interaction_score.items(), - key=lambda x: abs(x[1]), reverse=True) - - if ModelConfig.model_desc: - fis_ratio = ModelConfig.model_desc["custom"]["fis_ratio"] - else: - fis_ratio = 1.0 - top_k = int(len(feature_interaction_score) * min(1.0, fis_ratio)) - self.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k])) - - # add selected_pairs - setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs) - - def after_train(self, logs=None): - """Call after_train of the managed callbacks.""" - curr_auc = float(self.trainer.valid_metrics.results['auc']) - - self.sieve_board = self.sieve_board.append( - { - 'selected_feature_pairs': self.selected_pairs, - 'score': curr_auc - }, ignore_index=True) - result_file = FileOps.join_path( - self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__)) - - self.sieve_board.to_csv(result_file, sep='\t') +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""AutoGate top-k version Stage2 TrainerCallback.""" + +import logging +import pandas as pd +from vega.common import ClassFactory, ClassType +from vega.common import FileOps +from vega.algorithms.nas.fis.ctr_trainer_callback import CtrTrainerCallback +from vega.core.pipeline.conf import ModelConfig + +logger = logging.getLogger(__name__) + + +@ClassFactory.register(ClassType.CALLBACK) +class AutoGateS2TrainerCallback(CtrTrainerCallback): + """AutoGateS2TrainerCallback module.""" + + def __init__(self): + """Construct AutoGateS2TrainerCallback class.""" + super(CtrTrainerCallback, self).__init__() + self.sieve_board = pd.DataFrame( + columns=['selected_feature_pairs', 'score']) + self.selected_pairs = list() + + logging.info("init autogate s2 trainer callback") + + def before_train(self, logs=None): + """Call before_train of the managed callbacks.""" + super().before_train(logs) + + """Be called before the training process.""" + hpo_result = FileOps.load_pickle(FileOps.join_path( + self.trainer.local_output_path, 'best_config.pickle')) + logging.info("loading stage1_hpo_result \n{}".format(hpo_result)) + + feature_interaction_score = hpo_result['feature_interaction_score'] + print('feature_interaction_score:', feature_interaction_score) + sorted_pairs = sorted(feature_interaction_score.items(), + key=lambda x: abs(x[1]), reverse=True) + + if ModelConfig.model_desc: + fis_ratio = ModelConfig.model_desc["custom"]["fis_ratio"] + else: + fis_ratio = 1.0 + top_k = int(len(feature_interaction_score) * min(1.0, fis_ratio)) + self.selected_pairs = list(map(lambda x: x[0], sorted_pairs[:top_k])) + + # add selected_pairs + setattr(ModelConfig.model_desc['custom'], 'selected_pairs', self.selected_pairs) + + def after_train(self, logs=None): + """Call after_train of the managed callbacks.""" + curr_auc = float(self.trainer.valid_metrics.results['auc']) + + self.sieve_board = self.sieve_board.append( + { + 'selected_feature_pairs': self.selected_pairs, + 'score': curr_auc + }, ignore_index=True) + result_file = FileOps.join_path( + self.trainer.local_output_path, '{}_result.csv'.format(self.trainer.__worker_id__)) + + self.sieve_board.to_csv(result_file, sep='\t') diff --git a/vega/algorithms/nas/fis/autogroup_trainer_callback.py b/vega/algorithms/nas/fis/autogroup_trainer_callback.py index f73dcd9..b9ddb50 100644 --- a/vega/algorithms/nas/fis/autogroup_trainer_callback.py +++ b/vega/algorithms/nas/fis/autogroup_trainer_callback.py @@ -1,118 +1,124 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""AutoGroup algorithm trainer callback file.""" - -import logging -import torch.optim as optim -from vega.common import ClassFactory, ClassType -from .ctr_trainer_callback import CtrTrainerCallback - -logger = logging.getLogger(__name__) - - -@ClassFactory.register(ClassType.CALLBACK) -class AutoGroupTrainerCallback(CtrTrainerCallback): - """AutoGroup algorithm trainer callbacks. - - Different from other trainer method, AutoGroup respectively train network params and structure params, - thus, there define two optimizers to train these params respectively. - """ - - def __init__(self): - """Class of AutoGroupTrainerCallback.""" - super(AutoGroupTrainerCallback, self).__init__() - logging.info("init autogroup trainer callback finish.") - - def before_train(self, logs=None): - """Be called before the training process.""" - self._init_all_settings() - - def _init_all_settings(self): - """Init all settings from config.""" - self.config = self.trainer.config - logging.info("AutoGroupTrainerCallbacks: {}".format(self.config)) - self.struc_optimizer = self._init_structure_optimizer(self.trainer.model) - self.net_optimizer = self._init_network_optimizer(self.trainer.model) - - def _init_structure_optimizer(self, model): - """ - Init structure optimizer for optimize structure params in AutoGroup model. - - :param model: Autogroup model - :type model: torch.nn.Module - :return: optimizer object - :rtype: torch.optim.Optimizer - """ - learnable_params = model.structure_params - logging.info("init net optimizer, lr = {}".format(self.config.struc_optim.struct_lr)) - optimizer = optim.Adam(learnable_params, lr=float(self.config.struc_optim.struct_lr)) - - logging.info("init structure optimizer finish.") - return optimizer - - def _init_network_optimizer(self, model): - """ - Init structure optimizer for optimize structure params in AutoGroup model. - - :param model: Autogroup model - :type model: torch.nn.Module - :return: optimizer object - :rtype: torch.optim.Optimizer - """ - learnable_params = model.net_params - optimizer = optim.Adam(learnable_params, lr=float(self.config.net_optim.net_lr)) - logging.info("init net optimizer, lr = {}".format(self.config.net_optim.net_lr)) - logging.info("init structure optimizer finish.") - return optimizer - - def train_step(self, batch): - """ - Training progress for a batch data. - - :param batch: batch train data. - :type batch: list object - :return: loss & training loss - :rtype: dict object - """ - self.trainer.model.train() - input, target = batch - - # first step: train network params. - self.net_optimizer.zero_grad() - output = self.trainer.model(input, fix_structure=True) - loss = self.trainer.loss(output, target) - loss.backward() - self.net_optimizer.step() - - # second step : train struture params - self.struc_optimizer.zero_grad() - struct_output = self.trainer.model(input, fix_structure=False) - struct_loss = self.trainer.loss(struct_output, target) - struct_loss.backward() - self.struc_optimizer.step() - - return {'loss': loss.item(), - 'train_batch_output': output, - 'lr': self.trainer.lr_scheduler.get_lr()} - - def valid_step(self, batch): - """ - Validate progress for a batch data. - - :param batch: batch data - :type object - :return: valid batch output - :rtype: dict object - """ - input, target = batch - - output = self.trainer.model(input, fix_structure=True) - return {'valid_batch_output': output} +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""AutoGroup algorithm trainer callback file.""" + +import logging +import torch.optim as optim +from vega.common import ClassFactory, ClassType +from .ctr_trainer_callback import CtrTrainerCallback + +logger = logging.getLogger(__name__) + + +@ClassFactory.register(ClassType.CALLBACK) +class AutoGroupTrainerCallback(CtrTrainerCallback): + """AutoGroup algorithm trainer callbacks. + + Different from other trainer method, AutoGroup respectively train network params and structure params, + thus, there define two optimizers to train these params respectively. + """ + + def __init__(self): + """Class of AutoGroupTrainerCallback.""" + super(AutoGroupTrainerCallback, self).__init__() + logging.info("init autogroup trainer callback finish.") + + def before_train(self, logs=None): + """Be called before the training process.""" + self._init_all_settings() + + def _init_all_settings(self): + """Init all settings from config.""" + self.config = self.trainer.config + logging.info("AutoGroupTrainerCallbacks: {}".format(self.config)) + self.struc_optimizer = self._init_structure_optimizer(self.trainer.model) + self.net_optimizer = self._init_network_optimizer(self.trainer.model) + + def _init_structure_optimizer(self, model): + """ + Init structure optimizer for optimize structure params in AutoGroup model. + + :param model: Autogroup model + :type model: torch.nn.Module + :return: optimizer object + :rtype: torch.optim.Optimizer + """ + learnable_params = model.structure_params + logging.info("init net optimizer, lr = {}".format(self.config.struc_optim.struct_lr)) + optimizer = optim.Adam(learnable_params, lr=float(self.config.struc_optim.struct_lr)) + + logging.info("init structure optimizer finish.") + return optimizer + + def _init_network_optimizer(self, model): + """ + Init structure optimizer for optimize structure params in AutoGroup model. + + :param model: Autogroup model + :type model: torch.nn.Module + :return: optimizer object + :rtype: torch.optim.Optimizer + """ + learnable_params = model.net_params + optimizer = optim.Adam(learnable_params, lr=float(self.config.net_optim.net_lr)) + logging.info("init net optimizer, lr = {}".format(self.config.net_optim.net_lr)) + logging.info("init structure optimizer finish.") + return optimizer + + def train_step(self, batch): + """ + Training progress for a batch data. + + :param batch: batch train data. + :type batch: list object + :return: loss & training loss + :rtype: dict object + """ + self.trainer.model.train() + input, target = batch + + # first step: train network params. + self.net_optimizer.zero_grad() + output = self.trainer.model(input, fix_structure=True) + loss = self.trainer.loss(output, target) + loss.backward() + self.net_optimizer.step() + + # second step : train struture params + self.struc_optimizer.zero_grad() + struct_output = self.trainer.model(input, fix_structure=False) + struct_loss = self.trainer.loss(struct_output, target) + struct_loss.backward() + self.struc_optimizer.step() + + return {'loss': loss.item(), + 'train_batch_output': output, + 'lr': self.trainer.lr_scheduler.get_lr()} + + def valid_step(self, batch): + """ + Validate progress for a batch data. + + :param batch: batch data + :type object + :return: valid batch output + :rtype: dict object + """ + input, target = batch + + output = self.trainer.model(input, fix_structure=True) + return {'valid_batch_output': output} diff --git a/vega/algorithms/nas/fis/ctr_trainer_callback.py b/vega/algorithms/nas/fis/ctr_trainer_callback.py index 3fedc70..293a013 100644 --- a/vega/algorithms/nas/fis/ctr_trainer_callback.py +++ b/vega/algorithms/nas/fis/ctr_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Base CTR model TrainerCallback.""" diff --git a/vega/algorithms/nas/fis/grda.py b/vega/algorithms/nas/fis/grda.py index ab8ed78..d17a721 100644 --- a/vega/algorithms/nas/fis/grda.py +++ b/vega/algorithms/nas/fis/grda.py @@ -1,95 +1,99 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""GRDA optimizer. - -"Generalized Regularized Dual Averaging" is an optimizer that can learn a small sub-network during training, -if one starts from an overparameterized dense network. -Citation: Chao, S.-K., Wang, Z., Xing, Y. and Cheng, G. (2020). -Directional pruning of deep neural networks. https://arxiv.org/abs/2006.09358. -""" - -import torch -from torch.optim.optimizer import Optimizer - - -class gRDA(Optimizer): - """GRDA module.""" - - def __init__(self, params, lr=0.01, c=0.0, mu=0.7): - """ - Constuct gRDA class. - - :param params: learnable params - :type params: list object - :param lr: learning rate - :type lr: float - :param c: initial sparse control constant - :type c: float - :param mu: sparsity control - :type mu: float - - :return: optimizer object - :rtype: class - """ - defaults = dict(lr=lr, c=c, mu=mu) - super(gRDA, self).__init__(params, defaults) - - def __setstate__(self, state): - """Setstate.""" - super(gRDA, self).__setstate__(state) - - def step(self, closure=None): - """ - Optimizer gRDA performs a single optimization step. - - :param closure: a closure that reevaluates the model - :type closure: callable object - :return: loss - :rtype: float - """ - loss = None - if closure is not None: - loss = closure() - - for group in self.param_groups: - lr = group['lr'] - c = group['c'] - mu = group['mu'] - - for p in group['params']: - if p.grad is None: - continue - d_p = p.grad.data - - param_state = self.state[p] - - if 'iter_num' not in param_state: - iter_num = param_state['iter_num'] = torch.zeros(1) - accumulator = param_state['accumulator'] = torch.FloatTensor(p.shape).to(p.device) - l1_accumulation = param_state['l1_accumulation'] = torch.zeros(1) - accumulator.data = p.clone() - - else: - iter_num = param_state['iter_num'] - accumulator = param_state['accumulator'] - l1_accumulation = param_state['l1_accumulation'] - iter_num.add_(1) - accumulator.data.add_(-lr, d_p) - - # l1 = c * torch.pow(torch.tensor(lr), 0.5 + mu) * torch.pow(iter_num, mu) - l1_diff = c * torch.pow(torch.tensor(lr), mu + 0.5) * torch.pow(iter_num, mu) - c * torch.pow( - torch.tensor(lr), mu + 0.5) * torch.pow(iter_num - 1, mu) - l1_accumulation += l1_diff - - new_a_l1 = torch.abs(accumulator.data) - l1_accumulation.to(p.device) - p.data = torch.sign(accumulator.data) * new_a_l1.clamp(min=0) - - return loss +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""GRDA optimizer. + +"Generalized Regularized Dual Averaging" is an optimizer that can learn a small sub-network during training, +if one starts from an overparameterized dense network. +Citation: Chao, S.-K., Wang, Z., Xing, Y. and Cheng, G. (2020). +Directional pruning of deep neural networks. https://arxiv.org/abs/2006.09358. +""" + +import torch +from torch.optim.optimizer import Optimizer + + +class gRDA(Optimizer): + """GRDA module.""" + + def __init__(self, params, lr=0.01, c=0.0, mu=0.7): + """ + Constuct gRDA class. + + :param params: learnable params + :type params: list object + :param lr: learning rate + :type lr: float + :param c: initial sparse control constant + :type c: float + :param mu: sparsity control + :type mu: float + + :return: optimizer object + :rtype: class + """ + defaults = dict(lr=lr, c=c, mu=mu) + super(gRDA, self).__init__(params, defaults) + + def __setstate__(self, state): + """Setstate.""" + super(gRDA, self).__setstate__(state) + + def step(self, closure=None): + """ + Optimizer gRDA performs a single optimization step. + + :param closure: a closure that reevaluates the model + :type closure: callable object + :return: loss + :rtype: float + """ + loss = None + if closure is not None: + loss = closure() + + for group in self.param_groups: + lr = group['lr'] + c = group['c'] + mu = group['mu'] + + for p in group['params']: + if p.grad is None: + continue + d_p = p.grad.data + + param_state = self.state[p] + + if 'iter_num' not in param_state: + iter_num = param_state['iter_num'] = torch.zeros(1) + accumulator = param_state['accumulator'] = torch.FloatTensor(p.shape).to(p.device) + l1_accumulation = param_state['l1_accumulation'] = torch.zeros(1) + accumulator.data = p.clone() + + else: + iter_num = param_state['iter_num'] + accumulator = param_state['accumulator'] + l1_accumulation = param_state['l1_accumulation'] + iter_num.add_(1) + accumulator.data.add_(-lr, d_p) + l1_diff = c * torch.pow(torch.tensor(lr), mu + 0.5) * torch.pow(iter_num, mu) - c * torch.pow( + torch.tensor(lr), mu + 0.5) * torch.pow(iter_num - 1, mu) + l1_accumulation += l1_diff + + new_a_l1 = torch.abs(accumulator.data) - l1_accumulation.to(p.device) + p.data = torch.sign(accumulator.data) * new_a_l1.clamp(min=0) + + return loss diff --git a/vega/algorithms/nas/mfasc/conf.py b/vega/algorithms/nas/mfasc/conf.py index 38518bc..3c057d3 100644 --- a/vega/algorithms/nas/mfasc/conf.py +++ b/vega/algorithms/nas/mfasc/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" diff --git a/vega/algorithms/nas/mfasc/mfasc.py b/vega/algorithms/nas/mfasc/mfasc.py index 3e6f631..5d64392 100644 --- a/vega/algorithms/nas/mfasc/mfasc.py +++ b/vega/algorithms/nas/mfasc/mfasc.py @@ -1,147 +1,152 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Multi-fidelity Active Search with Co-kriging.""" - -import copy -from vega.common import update_dict -from vega.common import ClassFactory, ClassType -from vega.core.search_algs import SearchAlgorithm -import itertools -from sklearn import preprocessing -import numpy as np -import logging - -from . import mfasc_utils -from .conf import MFASCConfig - -logger = logging.getLogger(__name__) -''' -Note: search steps must be performed successively -(parallel calls of the search method will violate the algorithms assumptions). -''' - - -@ClassFactory.register(ClassType.SEARCH_ALGORITHM) -class MFASC(SearchAlgorithm): - """Multi-fidelity Active Search with Co-kriging algorithm.""" - - config = MFASCConfig() - - def __init__(self, search_space): - """Construct the MFASC search class. - - :param search_space: config of the search space - :type search_space: dictionary - """ - super(MFASC, self).__init__(search_space) - self.search_space = copy.deepcopy(search_space) - self.budget_spent = 0 - self.sample_size = self.config.sample_size - self.batch_size = self.config.batch_size - self.hf_epochs = self.config.hf_epochs - self.lf_epochs = self.config.lf_epochs - self.max_budget = self.config.max_budget # total amount of epochs to train - self.predictor = mfasc_utils.make_mf_predictor(self.config) - self.r = self.config.fidelity_ratio # fidelity ratio from the MFASC algorithm - self.min_hf_sample_size = self.config.min_hf_sample_size - self.min_lf_sample_size = self.config.min_lf_sample_size - self.hf_sample = [] # pairs of (id, score) - self.lf_sample = [] # pairs of (id, score) - self.rho = self.config.prior_rho - self.beta = self.config.beta - self.cur_fidelity = None - self.cur_i = None - self.best_model_idx = None - self.X = self.search_space.get_sample_space(self.sample_size) - self.choices = [self.search_space.decode(x) for x in self.X] - self.X = preprocessing.scale(self.X, axis=0) - - def search(self): - """Search one random model. - - :return: total spent budget (training epochs), the model, and current training epochs (fidelity) - :rtype: int, dict, int - """ - remaining_hf_inds = np.array(list(set(range(len(self.X))) - set([x[0] for x in self.hf_sample]))) - remaining_lf_inds = np.array(list(set(range(len(self.X))) - set([x[0] for x in self.lf_sample]))) - if len(self.hf_sample) < self.min_hf_sample_size: - # init random hf sample - i = remaining_hf_inds[np.random.randint(len(remaining_hf_inds))] - train_epochs = self.hf_epochs - self.cur_fidelity = 'high' - elif len(self.lf_sample) < self.min_lf_sample_size: - # init random lf sample - i = remaining_lf_inds[np.random.randint(len(remaining_lf_inds))] - train_epochs = self.lf_epochs - self.cur_fidelity = 'low' - else: - # update model - X_low = np.array([self.X[s[0]] for s in self.lf_sample]) - y_low = np.array([s[1] for s in self.lf_sample]) - X_high = np.array([self.X[s[0]] for s in self.hf_sample]) - y_high = np.array([s[1] for s in self.hf_sample]) - self.predictor.fit(X_low, y_low, X_high, y_high) - # main seach - if (len(self.hf_sample) + len(self.lf_sample) + 1) % self.r == 0: - # search hf - inds = remaining_hf_inds[np.random.choice(len(remaining_hf_inds), self.batch_size, replace=False)] - X_test = np.array([self.X[i] for i in inds]) - self.rho, mu, sigma = self.predictor.predict_hf(X_test) - acquisition_score = mu + self.beta * sigma - i = inds[np.argmax(acquisition_score)] - self.cur_fidelity = 'high' - train_epochs = self.hf_epochs - else: - # search lf - inds = remaining_lf_inds[np.random.choice(len(remaining_lf_inds), self.batch_size, replace=False)] - X_test = np.array([self.X[i] for i in inds]) - mu, sigma = self.predictor.predict_lf(X_test) - if self.rho > 0: - acquisition_score = mu + self.beta * sigma - else: - acquisition_score = mu - self.beta * sigma - i = inds[np.argmin(acquisition_score)] - train_epochs = self.lf_epochs - self.cur_fidelity = 'low' - - desc = self.choices[i] - self.budget_spent += train_epochs - self.cur_i = i - desc["trainer.epochs"] = train_epochs - return {"worker_id": self.budget_spent, "encoded_desc": desc} - - def update(self, report): - """Update function. - - :param report: the serialized report. - :type report: dict - """ - logger.info(f'Updating, cur fidelity: {self.cur_fidelity}') - acc = report['performance'].get('accuracy', np.nan) - - if self.cur_fidelity == 'high': - self.hf_sample.append((self.cur_i, acc)) - - self.best_model_idx = max(self.hf_sample, key=lambda x: x[1])[0] - self.best_model_desc = self.choices[self.best_model_idx] - elif self.cur_fidelity == 'low': - self.lf_sample.append((self.cur_i, acc)) - else: - raise ValueError(f'cur fidelity is {self.cur_fidelity}; it shall be either "high" or "low"') - - @property - def is_completed(self): - """Tell whether the search process is completed. - - :return: True is completed, or False otherwise - :rtype: bool - """ - return self.budget_spent > self.max_budget +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Multi-fidelity Active Search with Co-kriging.""" + +import copy +import itertools +import logging +import numpy as np +from sklearn import preprocessing +from vega.common import update_dict +from vega.common import ClassFactory, ClassType +from vega.core.search_algs import SearchAlgorithm +from . import mfasc_utils +from .conf import MFASCConfig + +logger = logging.getLogger(__name__) +''' +Note: search steps must be performed successively +(parallel calls of the search method will violate the algorithms assumptions). +''' + + +@ClassFactory.register(ClassType.SEARCH_ALGORITHM) +class MFASC(SearchAlgorithm): + """Multi-fidelity Active Search with Co-kriging algorithm.""" + + config = MFASCConfig() + + def __init__(self, search_space): + """Construct the MFASC search class. + + :param search_space: config of the search space + :type search_space: dictionary + """ + super(MFASC, self).__init__(search_space) + self.search_space = copy.deepcopy(search_space) + self.budget_spent = 0 + self.sample_size = self.config.sample_size + self.batch_size = self.config.batch_size + self.hf_epochs = self.config.hf_epochs + self.lf_epochs = self.config.lf_epochs + self.max_budget = self.config.max_budget # total amount of epochs to train + self.predictor = mfasc_utils.make_mf_predictor(self.config) + self.r = self.config.fidelity_ratio # fidelity ratio from the MFASC algorithm + self.min_hf_sample_size = self.config.min_hf_sample_size + self.min_lf_sample_size = self.config.min_lf_sample_size + self.hf_sample = [] # pairs of (id, score) + self.lf_sample = [] # pairs of (id, score) + self.rho = self.config.prior_rho + self.beta = self.config.beta + self.cur_fidelity = None + self.cur_i = None + self.best_model_idx = None + self.X = self.search_space.get_sample_space(self.sample_size) + self.choices = [self.search_space.decode(x) for x in self.X] + self.X = preprocessing.scale(self.X, axis=0) + + def search(self): + """Search one random model. + + :return: total spent budget (training epochs), the model, and current training epochs (fidelity) + :rtype: int, dict, int + """ + remaining_hf_inds = np.array(list(set(range(len(self.X))) - set([x[0] for x in self.hf_sample]))) + remaining_lf_inds = np.array(list(set(range(len(self.X))) - set([x[0] for x in self.lf_sample]))) + if len(self.hf_sample) < self.min_hf_sample_size: + # init random hf sample + i = remaining_hf_inds[np.random.randint(len(remaining_hf_inds))] + train_epochs = self.hf_epochs + self.cur_fidelity = 'high' + elif len(self.lf_sample) < self.min_lf_sample_size: + # init random lf sample + i = remaining_lf_inds[np.random.randint(len(remaining_lf_inds))] + train_epochs = self.lf_epochs + self.cur_fidelity = 'low' + else: + # update model + X_low = np.array([self.X[s[0]] for s in self.lf_sample]) + y_low = np.array([s[1] for s in self.lf_sample]) + X_high = np.array([self.X[s[0]] for s in self.hf_sample]) + y_high = np.array([s[1] for s in self.hf_sample]) + self.predictor.fit(X_low, y_low, X_high, y_high) + # main seach + if (len(self.hf_sample) + len(self.lf_sample) + 1) % self.r == 0: + # search hf + inds = remaining_hf_inds[np.random.choice(len(remaining_hf_inds), self.batch_size, replace=False)] + X_test = np.array([self.X[i] for i in inds]) + self.rho, mu, sigma = self.predictor.predict_hf(X_test) + acquisition_score = mu + self.beta * sigma + i = inds[np.argmax(acquisition_score)] + self.cur_fidelity = 'high' + train_epochs = self.hf_epochs + else: + # search lf + inds = remaining_lf_inds[np.random.choice(len(remaining_lf_inds), self.batch_size, replace=False)] + X_test = np.array([self.X[i] for i in inds]) + mu, sigma = self.predictor.predict_lf(X_test) + if self.rho > 0: + acquisition_score = mu + self.beta * sigma + else: + acquisition_score = mu - self.beta * sigma + i = inds[np.argmin(acquisition_score)] + train_epochs = self.lf_epochs + self.cur_fidelity = 'low' + + desc = self.choices[i] + self.budget_spent += train_epochs + self.cur_i = i + desc["trainer.epochs"] = train_epochs + return {"worker_id": self.budget_spent, "encoded_desc": desc} + + def update(self, report): + """Update function. + + :param report: the serialized report. + :type report: dict + """ + logger.info(f'Updating, cur fidelity: {self.cur_fidelity}') + acc = report['performance'].get('accuracy', np.nan) + + if self.cur_fidelity == 'high': + self.hf_sample.append((self.cur_i, acc)) + + self.best_model_idx = max(self.hf_sample, key=lambda x: x[1])[0] + self.best_model_desc = self.choices[self.best_model_idx] + elif self.cur_fidelity == 'low': + self.lf_sample.append((self.cur_i, acc)) + else: + raise ValueError(f'cur fidelity is {self.cur_fidelity}; it shall be either "high" or "low"') + + @property + def is_completed(self): + """Tell whether the search process is completed. + + :return: True is completed, or False otherwise + :rtype: bool + """ + return self.budget_spent > self.max_budget diff --git a/vega/algorithms/nas/mfasc/mfasc_utils.py b/vega/algorithms/nas/mfasc/mfasc_utils.py index 01fbe79..c20d657 100644 --- a/vega/algorithms/nas/mfasc/mfasc_utils.py +++ b/vega/algorithms/nas/mfasc/mfasc_utils.py @@ -1,117 +1,122 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Utilities (predictors) for multi-fidelity active search.""" - -import numpy as np -from sklearn.ensemble import BaggingRegressor -from sklearn.ensemble import GradientBoostingRegressor -import copy -from scipy.stats import linregress -from sklearn.gaussian_process.kernels import RBF, ConstantKernel, WhiteKernel -from . import mfgpr -from .conf import MFASCConfig - - -class MFModel: - """Base class for Multifidelity inference model.""" - - def __init__(self, **args): - """Init model.""" - return - - def fit(self, X_train_lf, y_train_lf, X_train_hf, y_train_hf): - """Fits a model to low- and high- fidelity samples.""" - raise NotImplementedError - - def predict_lf(self, X): - """Predicts low-fidelity values.""" - raise NotImplementedError - - def predict_hf(self, X): - """Predicts low-fidelity values.""" - raise NotImplementedError - - -class MFBaggingRegressorStacked(MFModel): - """Stacked Gradient Boosting Regression predictor.""" - - def __init__(self, **args): - """Init model.""" - self.model_lf = BaggingRegressor(**copy.deepcopy(args)) - self.model_hf = BaggingRegressor(**copy.deepcopy(args)) - - def fit(self, X_train_lf, y_train_lf, X_train_hf, y_train_hf): - """Fits a model to low- and high- fidelity samples using stacking scheme for BaggingRegressor.""" - self.model_lf.fit(X_train_lf, y_train_lf) - X_train_hf = np.hstack((X_train_hf, self.model_lf.predict(X_train_hf).reshape(-1, 1))) - self.model_hf.fit(X_train_hf, y_train_hf) - - def predict_hf(self, X): - """Predict low-fidelity values.""" - y_pred_lf = self.model_lf.predict(X) - X = np.hstack((X, y_pred_lf.reshape(-1, 1))) - - base_preds = [e.predict(X) for e in self.model_hf.estimators_] - - y_pred_hf = np.mean(base_preds, axis=0) - - rho = linregress(y_pred_lf, y_pred_hf)[0] # get slope - - return rho, y_pred_hf, np.std(base_preds, axis=0) - - def predict_lf(self, X): - """Predict low-fidelity values.""" - base_preds = [e.predict(X) for e in self.model_lf.estimators_] - - return np.mean(base_preds, axis=0), np.std(base_preds, axis=0) - - -class MFGPR(MFModel): - """Multi-fidelity Gaussian Process Regression predictor.""" - - def __init__(self, **args): - """Init model.""" - self.model = mfgpr.GaussianProcessCoKriging(**copy.deepcopy(args)) - - def fit(self, X_train_lf, y_train_lf, X_train_hf, y_train_hf): - """Fits a model to low- and high- fidelity samples using stacking scheme for BaggingRegressor.""" - self.model.fit(X_train_lf, y_train_lf, X_train_hf, y_train_hf) - - def predict_hf(self, X): - """Predicts low-fidelity values.""" - pred_mean, pred_std = self.model.predict(X, return_std=True) - - return self.model.rho, pred_mean, pred_std - - def predict_lf(self, X): - """Predicts low-fidelity values.""" - pred_mean, pred_std = self.model.predict(X, return_std=True) - - return pred_mean, pred_std - - -def make_mf_predictor(config=MFASCConfig()): - """Make a multi-fidelity model based on config.""" - if config.predictor_type == 'gb_stacked': - return MFBaggingRegressorStacked(base_estimator=GradientBoostingRegressor( - n_estimators=50, - max_depth=5, - ), - n_estimators=20, - max_samples=0.51, - n_jobs=1) - elif config.predictor_type == 'mfgpr': - composite_kernel = RBF(length_scale=1, length_scale_bounds=(0.001, 100)) - composite_kernel = ConstantKernel(1, constant_value_bounds=(0.001, 100)) * composite_kernel - composite_kernel = WhiteKernel(noise_level=1, noise_level_bounds=(0.001, 100)) + composite_kernel - return MFGPR(kernel=composite_kernel, n_restarts_optimizer=3) - else: - raise ValueError("Unknown name, possible options: 'xgb_stacked' and 'mfgpr'") +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities (predictors) for multi-fidelity active search.""" +import copy +import numpy as np +from sklearn.ensemble import BaggingRegressor +from sklearn.ensemble import GradientBoostingRegressor +from scipy.stats import linregress +from sklearn.gaussian_process.kernels import RBF, ConstantKernel, WhiteKernel +from . import mfgpr +from .conf import MFASCConfig + + +class MFModel: + """Base class for Multifidelity inference model.""" + + def __init__(self, **args): + """Init model.""" + return + + def fit(self, X_train_lf, y_train_lf, X_train_hf, y_train_hf): + """Fits a model to low- and high- fidelity samples.""" + raise NotImplementedError + + def predict_lf(self, X): + """Predicts low-fidelity values.""" + raise NotImplementedError + + def predict_hf(self, X): + """Predicts low-fidelity values.""" + raise NotImplementedError + + +class MFBaggingRegressorStacked(MFModel): + """Stacked Gradient Boosting Regression predictor.""" + + def __init__(self, **args): + """Init model.""" + self.model_lf = BaggingRegressor(**copy.deepcopy(args)) + self.model_hf = BaggingRegressor(**copy.deepcopy(args)) + + def fit(self, X_train_lf, y_train_lf, X_train_hf, y_train_hf): + """Fits a model to low- and high- fidelity samples using stacking scheme for BaggingRegressor.""" + self.model_lf.fit(X_train_lf, y_train_lf) + X_train_hf = np.hstack((X_train_hf, self.model_lf.predict(X_train_hf).reshape(-1, 1))) + self.model_hf.fit(X_train_hf, y_train_hf) + + def predict_hf(self, X): + """Predict low-fidelity values.""" + y_pred_lf = self.model_lf.predict(X) + X = np.hstack((X, y_pred_lf.reshape(-1, 1))) + + base_preds = [e.predict(X) for e in self.model_hf.estimators_] + + y_pred_hf = np.mean(base_preds, axis=0) + + rho = linregress(y_pred_lf, y_pred_hf)[0] # get slope + + return rho, y_pred_hf, np.std(base_preds, axis=0) + + def predict_lf(self, X): + """Predict low-fidelity values.""" + base_preds = [e.predict(X) for e in self.model_lf.estimators_] + + return np.mean(base_preds, axis=0), np.std(base_preds, axis=0) + + +class MFGPR(MFModel): + """Multi-fidelity Gaussian Process Regression predictor.""" + + def __init__(self, **args): + """Init model.""" + self.model = mfgpr.GaussianProcessCoKriging(**copy.deepcopy(args)) + + def fit(self, X_train_lf, y_train_lf, X_train_hf, y_train_hf): + """Fits a model to low- and high- fidelity samples using stacking scheme for BaggingRegressor.""" + self.model.fit(X_train_lf, y_train_lf, X_train_hf, y_train_hf) + + def predict_hf(self, X): + """Predicts low-fidelity values.""" + pred_mean, pred_std = self.model.predict(X, return_std=True) + + return self.model.rho, pred_mean, pred_std + + def predict_lf(self, X): + """Predicts low-fidelity values.""" + pred_mean, pred_std = self.model.predict(X, return_std=True) + + return pred_mean, pred_std + + +def make_mf_predictor(config=MFASCConfig()): + """Make a multi-fidelity model based on config.""" + if config.predictor_type == 'gb_stacked': + return MFBaggingRegressorStacked(base_estimator=GradientBoostingRegressor( + n_estimators=50, + max_depth=5, + ), + n_estimators=20, + max_samples=0.51, + n_jobs=1) + elif config.predictor_type == 'mfgpr': + composite_kernel = RBF(length_scale=1, length_scale_bounds=(0.001, 100)) + composite_kernel = ConstantKernel(1, constant_value_bounds=(0.001, 100)) * composite_kernel + composite_kernel = WhiteKernel(noise_level=1, noise_level_bounds=(0.001, 100)) + composite_kernel + return MFGPR(kernel=composite_kernel, n_restarts_optimizer=3) + else: + raise ValueError("Unknown name, possible options: 'xgb_stacked' and 'mfgpr'") diff --git a/vega/algorithms/nas/mfasc/mfgpr.py b/vega/algorithms/nas/mfasc/mfgpr.py index a882f77..ad2ca02 100644 --- a/vega/algorithms/nas/mfasc/mfgpr.py +++ b/vega/algorithms/nas/mfasc/mfgpr.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. # Author: Nikita Klyuchnikov diff --git a/vega/algorithms/nas/mfkd/mfkd.py b/vega/algorithms/nas/mfkd/mfkd.py index c371627..b7409a0 100644 --- a/vega/algorithms/nas/mfkd/mfkd.py +++ b/vega/algorithms/nas/mfkd/mfkd.py @@ -1,19 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """MFKD1.""" import copy import itertools -import numpy as np import logging +import numpy as np from sklearn import preprocessing from sklearn.gaussian_process import GaussianProcessRegressor as GPR from sklearn.gaussian_process.kernels import RBF diff --git a/vega/algorithms/nas/mfkd/simple_cnn.py b/vega/algorithms/nas/mfkd/simple_cnn.py index 9231427..f5fff4f 100644 --- a/vega/algorithms/nas/mfkd/simple_cnn.py +++ b/vega/algorithms/nas/mfkd/simple_cnn.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """MFKD1.""" diff --git a/vega/algorithms/nas/modnas/backend/__init__.py b/vega/algorithms/nas/modnas/backend/__init__.py index a3e60af..0acb3be 100644 --- a/vega/algorithms/nas/modnas/backend/__init__.py +++ b/vega/algorithms/nas/modnas/backend/__init__.py @@ -1,36 +1,44 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import importlib +import logging import traceback +from typing import Optional from modnas.registry.backend import build from . import predefined -from typing import Optional _backend = None _backend_keys = [] -def use(backend: Optional[str], *args, imported=False, **kwargs) -> None: +def use(backend_type: Optional[str], *args, imported=False, **kwargs) -> None: """Switch to backend by name.""" global _backend, _backend_keys - if backend == _backend or backend == 'none' or backend is None: + if backend_type == _backend or backend_type == 'none' or backend_type is None: return try: if imported: - bk_mod = importlib.import_module(backend) + bk_mod = importlib.import_module(backend_type) else: - bk_mod = build(backend, *args, **kwargs) - except ImportError: - traceback.print_exc() + bk_mod = build(backend_type, *args, **kwargs) + except ImportError as e: + logging.debug(traceback.format_exc()) + logging.error(f"error occured, message: {e}") return bk_vars = vars(bk_mod) bk_keys = bk_vars.keys() @@ -42,7 +50,7 @@ def use(backend: Optional[str], *args, imported=False, **kwargs) -> None: continue ns[k] = bk_vars[k] _backend_keys = list(bk_keys) - _backend = backend + _backend = backend_type def backend(): @@ -50,6 +58,6 @@ def backend(): return _backend -def is_backend(backend: str) -> bool: +def is_backend(backend_type: str) -> bool: """Return if the current backend is the given one.""" - return _backend == backend + return _backend == backend_type diff --git a/vega/algorithms/nas/modnas/backend/predefined/__init__.py b/vega/algorithms/nas/modnas/backend/predefined/__init__.py index 9a975c2..d84b2cf 100644 --- a/vega/algorithms/nas/modnas/backend/predefined/__init__.py +++ b/vega/algorithms/nas/modnas/backend/predefined/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from functools import partial import importlib diff --git a/vega/algorithms/nas/modnas/backend/predefined/tensorflow/utils.py b/vega/algorithms/nas/modnas/backend/predefined/tensorflow/utils.py index 7bedaea..3ec53d9 100644 --- a/vega/algorithms/nas/modnas/backend/predefined/tensorflow/utils.py +++ b/vega/algorithms/nas/modnas/backend/predefined/tensorflow/utils.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Tensorflow utils.""" import tensorflow as tf diff --git a/vega/algorithms/nas/modnas/backend/predefined/torch/__init__.py b/vega/algorithms/nas/modnas/backend/predefined/torch/__init__.py index 79c8c41..31ae1f0 100644 --- a/vega/algorithms/nas/modnas/backend/predefined/torch/__init__.py +++ b/vega/algorithms/nas/modnas/backend/predefined/torch/__init__.py @@ -1,9 +1,3 @@ -from .criterion import get_criterion -from .optimizer import get_optimizer -from .lr_scheduler import get_lr_scheduler -from .data_provider import get_data_provider -from .utils import version, init_device, get_device, set_device, get_dev_mem_used, model_summary,\ - clear_bn_running_statistics, recompute_bn_running_statistics import modnas.core.params.torch import modnas.arch_space.construct.torch import modnas.arch_space.export.torch @@ -13,3 +7,9 @@ import modnas.metrics.torch import modnas.trainer.torch import modnas.optim.torch +from .criterion import get_criterion +from .optimizer import get_optimizer +from .lr_scheduler import get_lr_scheduler +from .data_provider import get_data_provider +from .utils import version, init_device, get_device, set_device, get_dev_mem_used, model_summary,\ + clear_bn_running_statistics, recompute_bn_running_statistics diff --git a/vega/algorithms/nas/modnas/backend/predefined/torch/criterion.py b/vega/algorithms/nas/modnas/backend/predefined/torch/criterion.py index 447efeb..b7f10e4 100644 --- a/vega/algorithms/nas/modnas/backend/predefined/torch/criterion.py +++ b/vega/algorithms/nas/modnas/backend/predefined/torch/criterion.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Implementation of Criterions (Loss functions).""" import math diff --git a/vega/algorithms/nas/modnas/backend/predefined/torch/data_provider.py b/vega/algorithms/nas/modnas/backend/predefined/torch/data_provider.py index 3b236d9..84a6814 100644 --- a/vega/algorithms/nas/modnas/backend/predefined/torch/data_provider.py +++ b/vega/algorithms/nas/modnas/backend/predefined/torch/data_provider.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Torch data providers.""" import copy diff --git a/vega/algorithms/nas/modnas/backend/predefined/torch/lr_scheduler.py b/vega/algorithms/nas/modnas/backend/predefined/torch/lr_scheduler.py index bc19c1a..61b28ae 100644 --- a/vega/algorithms/nas/modnas/backend/predefined/torch/lr_scheduler.py +++ b/vega/algorithms/nas/modnas/backend/predefined/torch/lr_scheduler.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """LR Scheduler.""" import torch diff --git a/vega/algorithms/nas/modnas/backend/predefined/torch/optimizer.py b/vega/algorithms/nas/modnas/backend/predefined/torch/optimizer.py index 7127f80..81fd288 100644 --- a/vega/algorithms/nas/modnas/backend/predefined/torch/optimizer.py +++ b/vega/algorithms/nas/modnas/backend/predefined/torch/optimizer.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Parameter Optimizer.""" import torch diff --git a/vega/algorithms/nas/modnas/backend/predefined/torch/utils.py b/vega/algorithms/nas/modnas/backend/predefined/torch/utils.py index 69901ef..cf5b8d4 100644 --- a/vega/algorithms/nas/modnas/backend/predefined/torch/utils.py +++ b/vega/algorithms/nas/modnas/backend/predefined/torch/utils.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Torch utils.""" import numpy as np diff --git a/vega/algorithms/nas/modnas/callback/base.py b/vega/algorithms/nas/modnas/callback/base.py index d663daa..2d08e66 100644 --- a/vega/algorithms/nas/modnas/callback/base.py +++ b/vega/algorithms/nas/modnas/callback/base.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Base callback.""" +from typing import Callable, Dict, Optional, Tuple, Union from modnas.core.event import event_on, event_off from modnas.utils.logging import get_logger -from typing import Callable, Dict, Optional, Tuple, Union _HANDLER_CONF_TYPE = Dict[str, Union[Tuple[Callable, int], Callable]] diff --git a/vega/algorithms/nas/modnas/callback/predefined/early_stopping.py b/vega/algorithms/nas/modnas/callback/predefined/early_stopping.py index 872117f..0809ed0 100644 --- a/vega/algorithms/nas/modnas/callback/predefined/early_stopping.py +++ b/vega/algorithms/nas/modnas/callback/predefined/early_stopping.py @@ -1,20 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Early stopping.""" -from modnas.registry.callback import register -from ..base import CallbackBase from collections import OrderedDict +from typing import Any, Dict, Optional +from modnas.registry.callback import register from modnas.estim.base import EstimBase from modnas.optim.base import OptimBase -from typing import Any, Dict, Optional +from ..base import CallbackBase _ret_type = Optional[Dict[str, Any]] diff --git a/vega/algorithms/nas/modnas/callback/predefined/estim_exporter.py b/vega/algorithms/nas/modnas/callback/predefined/estim_exporter.py index 50186c9..8b59001 100644 --- a/vega/algorithms/nas/modnas/callback/predefined/estim_exporter.py +++ b/vega/algorithms/nas/modnas/callback/predefined/estim_exporter.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Estimator results exporter.""" from modnas.registry.callback import register diff --git a/vega/algorithms/nas/modnas/callback/predefined/estim_reporter.py b/vega/algorithms/nas/modnas/callback/predefined/estim_reporter.py index 070c21c..22053c0 100644 --- a/vega/algorithms/nas/modnas/callback/predefined/estim_reporter.py +++ b/vega/algorithms/nas/modnas/callback/predefined/estim_reporter.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Estimator statistics reporter.""" from modnas.registry.callback import register diff --git a/vega/algorithms/nas/modnas/callback/predefined/eta_reporter.py b/vega/algorithms/nas/modnas/callback/predefined/eta_reporter.py index 1584820..406d82f 100644 --- a/vega/algorithms/nas/modnas/callback/predefined/eta_reporter.py +++ b/vega/algorithms/nas/modnas/callback/predefined/eta_reporter.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ETA (remaining time) reporter.""" from modnas.utils import ETAMeter diff --git a/vega/algorithms/nas/modnas/callback/predefined/optimum.py b/vega/algorithms/nas/modnas/callback/predefined/optimum.py index 20b377a..2a513d0 100644 --- a/vega/algorithms/nas/modnas/callback/predefined/optimum.py +++ b/vega/algorithms/nas/modnas/callback/predefined/optimum.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Search optimum statistics reporter.""" from functools import partial diff --git a/vega/algorithms/nas/modnas/callback/predefined/trainer_reporter.py b/vega/algorithms/nas/modnas/callback/predefined/trainer_reporter.py index 66e0e7d..c42d50f 100644 --- a/vega/algorithms/nas/modnas/callback/predefined/trainer_reporter.py +++ b/vega/algorithms/nas/modnas/callback/predefined/trainer_reporter.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Trainer statistics reporter.""" from functools import partial diff --git a/vega/algorithms/nas/modnas/compat/importer.py b/vega/algorithms/nas/modnas/compat/importer.py index 8a25697..b8bd982 100644 --- a/vega/algorithms/nas/modnas/compat/importer.py +++ b/vega/algorithms/nas/modnas/compat/importer.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import hooks for ModularNAS (PEP 302).""" diff --git a/vega/algorithms/nas/modnas/compat/search_alg.py b/vega/algorithms/nas/modnas/compat/search_alg.py index 56cdec9..179b2ed 100644 --- a/vega/algorithms/nas/modnas/compat/search_alg.py +++ b/vega/algorithms/nas/modnas/compat/search_alg.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ModNasAlgorithm.""" diff --git a/vega/algorithms/nas/modnas/compat/trainer_callback.py b/vega/algorithms/nas/modnas/compat/trainer_callback.py index 640723f..809d872 100644 --- a/vega/algorithms/nas/modnas/compat/trainer_callback.py +++ b/vega/algorithms/nas/modnas/compat/trainer_callback.py @@ -1,16 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ModularNAS framework trainer callback.""" import copy +import logging import threading import traceback from vega.common import FileOps @@ -282,7 +289,7 @@ def estim_runner(): results['final'] = ret self.estim_ret = results except Exception: - traceback.print_exc() + logging.debug(traceback.format_exc()) # try to release the trainer self.trainer.train_loader = [] self.trainer.valid_loader = [] diff --git a/vega/algorithms/nas/modnas/contrib/callback/metrics_stats.py b/vega/algorithms/nas/modnas/contrib/callback/metrics_stats.py index ea8bc97..794b756 100644 --- a/vega/algorithms/nas/modnas/contrib/callback/metrics_stats.py +++ b/vega/algorithms/nas/modnas/contrib/callback/metrics_stats.py @@ -1,23 +1,29 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metrics statistics reporter.""" -import pickle import itertools +from collections import OrderedDict +from matplotlib import pyplot as plt +from typing import Dict, List, Tuple, Optional, Any from modnas.registry.callback import register from modnas.callback.base import CallbackBase -from matplotlib import pyplot as plt -from collections import OrderedDict from modnas.estim.base import EstimBase from modnas.optim.base import OptimBase -from typing import Dict, List, Tuple, Optional, Any +from vega.common import FileOps plt.switch_backend('Agg') @@ -61,8 +67,7 @@ def save_stats(self, ret: Dict[str, Any], estim: EstimBase, optim: OptimBase) -> plt.ylabel(axis[1]) plt.savefig(estim.expman.join('plot', 'metrics_{}.png'.format(axis_str))) result_path = estim.expman.join('output', 'metrics_results.pkl') - with open(result_path, 'wb') as f: - pickle.dump(results, f) - self.logger.info('metrics results saved to {}'.format(result_path)) + FileOps.dump_pickle(results, result_path) + self.logger.info('metrics results saved to {}'.format(result_path)) self.results = [] return ret diff --git a/vega/algorithms/nas/modnas/contrib/callback/mixedop_stats.py b/vega/algorithms/nas/modnas/contrib/callback/mixedop_stats.py index ad2b020..9c48a84 100644 --- a/vega/algorithms/nas/modnas/contrib/callback/mixedop_stats.py +++ b/vega/algorithms/nas/modnas/contrib/callback/mixedop_stats.py @@ -1,24 +1,30 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Mixed operator statistics reporter.""" import numpy as np -import pickle import torch.nn.functional as F +from matplotlib import pyplot as plt +from typing import Dict, Optional, Any from modnas.registry.callback import register from modnas.arch_space.mixed_ops import MixedOp from modnas.callback.base import CallbackBase -from matplotlib import pyplot as plt from modnas.estim.base import EstimBase from modnas.optim.base import OptimBase -from typing import Dict, Optional, Any +from vega.common import FileOps plt.switch_backend('Agg') @@ -60,8 +66,7 @@ def save_stats(self, ret: Dict[str, Any], estim: EstimBase, optim: OptimBase) -> plt.savefig(estim.expman.join('plot', 'prob_{}.png'.format(i))) save_probs.append(prob) probs_path = estim.expman.join('output', 'probs.pkl') - with open(probs_path, 'wb') as f: - pickle.dump(save_probs, f) - self.logger.info('mixed op probs saved to {}'.format(probs_path)) + FileOps.dump_pickle(save_probs, probs_path) + self.logger.info('mixed op probs saved to {}'.format(probs_path)) self.probs = [] return ret diff --git a/vega/algorithms/nas/modnas/contrib/callback/pareto.py b/vega/algorithms/nas/modnas/contrib/callback/pareto.py index e228e1b..2137f2f 100644 --- a/vega/algorithms/nas/modnas/contrib/callback/pareto.py +++ b/vega/algorithms/nas/modnas/contrib/callback/pareto.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Pareto optimum statistics reporter.""" from modnas.registry.callback import register diff --git a/vega/algorithms/nas/modnas/contrib/estim/fakedata.py b/vega/algorithms/nas/modnas/contrib/estim/fakedata.py index 65fd455..a6c53b1 100644 --- a/vega/algorithms/nas/modnas/contrib/estim/fakedata.py +++ b/vega/algorithms/nas/modnas/contrib/estim/fakedata.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Fake data estimator.""" import numpy as np +from typing import Dict, List, Union from modnas.core.param_space import ParamSpace from modnas.core.params import Categorical from modnas.estim.predefined.regression import RegressionEstim from modnas.registry.construct import register as register_constructor from modnas.registry.estim import register as register_estim from modnas.optim.base import OptimBase -from typing import Dict, List, Union @register_constructor diff --git a/vega/algorithms/nas/modnas/contrib/estim/nasbench.py b/vega/algorithms/nas/modnas/contrib/estim/nasbench.py index d88ba65..c810b06 100644 --- a/vega/algorithms/nas/modnas/contrib/estim/nasbench.py +++ b/vega/algorithms/nas/modnas/contrib/estim/nasbench.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """NASBench estimator.""" from modnas.core.params import Categorical diff --git a/vega/algorithms/nas/modnas/contrib/estim/progressive_shrinking.py b/vega/algorithms/nas/modnas/contrib/estim/progressive_shrinking.py index 2593ed2..224f05a 100644 --- a/vega/algorithms/nas/modnas/contrib/estim/progressive_shrinking.py +++ b/vega/algorithms/nas/modnas/contrib/estim/progressive_shrinking.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Implementation of Progressive Shrinking in Once for All.""" import itertools diff --git a/vega/algorithms/nas/modnas/contrib/estim/random_sampling.py b/vega/algorithms/nas/modnas/contrib/estim/random_sampling.py index 9798ad5..6a59886 100644 --- a/vega/algorithms/nas/modnas/contrib/estim/random_sampling.py +++ b/vega/algorithms/nas/modnas/contrib/estim/random_sampling.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Uniformly samples and trains subnets.""" import random diff --git a/vega/algorithms/nas/modnas/contrib/metrics/onnx_metrics.py b/vega/algorithms/nas/modnas/contrib/metrics/onnx_metrics.py index 4920738..1da6914 100644 --- a/vega/algorithms/nas/modnas/contrib/metrics/onnx_metrics.py +++ b/vega/algorithms/nas/modnas/contrib/metrics/onnx_metrics.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ONNX export metrics.""" import os diff --git a/vega/algorithms/nas/modnas/contrib/metrics/profiler_metrics.py b/vega/algorithms/nas/modnas/contrib/metrics/profiler_metrics.py index 8039bf0..f9f66a6 100644 --- a/vega/algorithms/nas/modnas/contrib/metrics/profiler_metrics.py +++ b/vega/algorithms/nas/modnas/contrib/metrics/profiler_metrics.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Local network hardware performance profiler metrics.""" import time diff --git a/vega/algorithms/nas/modnas/contrib/optim/hitl.py b/vega/algorithms/nas/modnas/contrib/optim/hitl.py index 3b43bf4..05e2c82 100644 --- a/vega/algorithms/nas/modnas/contrib/optim/hitl.py +++ b/vega/algorithms/nas/modnas/contrib/optim/hitl.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Human-in-the-loop Optimizer, used for debugging.""" from collections import OrderedDict diff --git a/vega/algorithms/nas/modnas/contrib/optim/hyperopt.py b/vega/algorithms/nas/modnas/contrib/optim/hyperopt.py index 69ef0a3..f4106a7 100644 --- a/vega/algorithms/nas/modnas/contrib/optim/hyperopt.py +++ b/vega/algorithms/nas/modnas/contrib/optim/hyperopt.py @@ -1,16 +1,22 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Optim wrapper for Hyperopt.""" -import numpy as np from collections import OrderedDict +import numpy as np from modnas.registry.optim import register from modnas.optim.base import OptimBase from modnas.core.params import Categorical, Numeric diff --git a/vega/algorithms/nas/modnas/contrib/optim/skopt.py b/vega/algorithms/nas/modnas/contrib/optim/skopt.py index a1c6ed1..986de35 100644 --- a/vega/algorithms/nas/modnas/contrib/optim/skopt.py +++ b/vega/algorithms/nas/modnas/contrib/optim/skopt.py @@ -1,23 +1,29 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Bayesian Optimizer based on scikit-optimize.""" import time -import numpy as np from collections import OrderedDict +import numpy as np +from typing import List, Dict, Optional from modnas.registry.optim import register from modnas.estim.base import EstimBase from modnas.optim.base import OptimBase from modnas.core.params import Categorical as ParamCategorical, Numeric from modnas.core.param_space import ParamSpace -from typing import List, Dict, Optional try: import skopt diff --git a/vega/algorithms/nas/modnas/contrib/tune/func.py b/vega/algorithms/nas/modnas/contrib/tune/func.py index 7164b38..6d73bd9 100644 --- a/vega/algorithms/nas/modnas/contrib/tune/func.py +++ b/vega/algorithms/nas/modnas/contrib/tune/func.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Wrappers that run hyperparameter tuning on functions.""" import copy diff --git a/vega/algorithms/nas/modnas/contrib/tune/prog.py b/vega/algorithms/nas/modnas/contrib/tune/prog.py index 6b24a40..3281e39 100644 --- a/vega/algorithms/nas/modnas/contrib/tune/prog.py +++ b/vega/algorithms/nas/modnas/contrib/tune/prog.py @@ -1,21 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Run hyperparameter tuning on python programs.""" - import sys import yaml import argparse import importlib from .func import tune -from modnas.utils import exec_file def tune_prog(progname=None, funcname=None, config=None, options=None, hparams=None, prog_args=None): @@ -32,10 +36,7 @@ def tune_prog(progname=None, funcname=None, config=None, options=None, hparams=N prog_spec = prog_spec.split(':') exec_name = prog_spec[0] funcname = funcname or (None if len(prog_spec) == 1 else prog_spec[1]) - if exec_name.endswith('.py'): - mod = exec_file(exec_name) - else: - mod = importlib.import_module(exec_name) + mod = importlib.import_module(exec_name) if funcname is None: for k in mod.keys(): if not k.startswith('_'): diff --git a/vega/algorithms/nas/modnas/core/__init__.py b/vega/algorithms/nas/modnas/core/__init__.py index 3c29e3c..477f5ca 100644 --- a/vega/algorithms/nas/modnas/core/__init__.py +++ b/vega/algorithms/nas/modnas/core/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. from functools import wraps, partial from typing import Callable, Type, List, Any diff --git a/vega/algorithms/nas/modnas/core/event.py b/vega/algorithms/nas/modnas/core/event.py index 12217d4..c457df3 100644 --- a/vega/algorithms/nas/modnas/core/event.py +++ b/vega/algorithms/nas/modnas/core/event.py @@ -1,20 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Event managing and triggering.""" import inspect from functools import wraps -from . import singleton, make_decorator +from typing import Any, Callable, Optional, Type, Union from modnas.utils.logging import get_logger from modnas.utils.config import merge_config -from typing import Any, Callable, Optional, Type, Union +from . import singleton, make_decorator logger = get_logger(__name__) diff --git a/vega/algorithms/nas/modnas/core/param_space.py b/vega/algorithms/nas/modnas/core/param_space.py index 755cf35..fd7fcc0 100644 --- a/vega/algorithms/nas/modnas/core/param_space.py +++ b/vega/algorithms/nas/modnas/core/param_space.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Architecture Parameter Space.""" from collections import OrderedDict -from . import singleton from modnas.utils.logging import get_logger +from . import singleton logger = get_logger(__name__) diff --git a/vega/algorithms/nas/modnas/core/params/base.py b/vega/algorithms/nas/modnas/core/params/base.py index 5dad4c4..dd9964a 100644 --- a/vega/algorithms/nas/modnas/core/params/base.py +++ b/vega/algorithms/nas/modnas/core/params/base.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Base parameter.""" from collections import OrderedDict +from typing import Any, Dict, Optional, Union, Callable from modnas.core.event import event_emit, event_on from modnas.core.param_space import ParamSpace -from typing import Any, Dict, Optional, Union, Callable class Param(): diff --git a/vega/algorithms/nas/modnas/core/params/default.py b/vega/algorithms/nas/modnas/core/params/default.py index 18dfe62..b892b80 100644 --- a/vega/algorithms/nas/modnas/core/params/default.py +++ b/vega/algorithms/nas/modnas/core/params/default.py @@ -1,20 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default parameter classes.""" + import random +from typing import Callable, List, Optional, Union, Any import numpy as np -from .base import Param from modnas.registry.params import register from modnas.core.param_space import ParamSpace -from typing import Callable, List, Optional, Union, Any +from .base import Param def _default_categorical_sampler(dim: int) -> int: diff --git a/vega/algorithms/nas/modnas/core/params/torch.py b/vega/algorithms/nas/modnas/core/params/torch.py index 9e51d3a..cd0b630 100644 --- a/vega/algorithms/nas/modnas/core/params/torch.py +++ b/vega/algorithms/nas/modnas/core/params/torch.py @@ -1,20 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Torch tensor parameter.""" + +from typing import Optional, Callable import torch -from .base import Param +from torch.nn.parameter import Parameter from modnas.registry.params import register from modnas.core.param_space import ParamSpace -from torch.nn.parameter import Parameter -from typing import Optional, Callable +from .base import Param def _default_tensor_sampler(shape: int, init_ratio: float = 1e-3) -> Parameter: diff --git a/vega/algorithms/nas/modnas/data_provider/base.py b/vega/algorithms/nas/modnas/data_provider/base.py index b1e7896..6a85786 100644 --- a/vega/algorithms/nas/modnas/data_provider/base.py +++ b/vega/algorithms/nas/modnas/data_provider/base.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Base DataProvider.""" from modnas.utils.logging import get_logger diff --git a/vega/algorithms/nas/modnas/data_provider/dataloader/torch/default.py b/vega/algorithms/nas/modnas/data_provider/dataloader/torch/default.py index 9c69264..b15a9ba 100644 --- a/vega/algorithms/nas/modnas/data_provider/dataloader/torch/default.py +++ b/vega/algorithms/nas/modnas/data_provider/dataloader/torch/default.py @@ -1,21 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default DataLoader.""" + +from typing import Any, Dict, Optional, Tuple, Union, Callable import random from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.sampler import SubsetRandomSampler from modnas.registry.data_loader import register from modnas.utils.logging import get_logger -from typing import Any, Dict, Optional, Tuple, Union, Callable logger = get_logger('data_loader') diff --git a/vega/algorithms/nas/modnas/data_provider/dataloader/torch/image_cls.py b/vega/algorithms/nas/modnas/data_provider/dataloader/torch/image_cls.py index edb7ba0..1daad60 100644 --- a/vega/algorithms/nas/modnas/data_provider/dataloader/torch/image_cls.py +++ b/vega/algorithms/nas/modnas/data_provider/dataloader/torch/image_cls.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Dataloader for Image classification.""" import random +from typing import Any, Dict, List, Optional, Tuple, Union, Callable import numpy as np from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.sampler import SubsetRandomSampler from modnas.registry.data_loader import register from modnas.utils.logging import get_logger -from typing import Any, Dict, List, Optional, Tuple, Union, Callable CLASSES_TYPE = Union[int, List[Union[str, int]]] diff --git a/vega/algorithms/nas/modnas/data_provider/dataset/torch/image_cls.py b/vega/algorithms/nas/modnas/data_provider/dataset/torch/image_cls.py index b356e0c..a91a1e8 100644 --- a/vega/algorithms/nas/modnas/data_provider/dataset/torch/image_cls.py +++ b/vega/algorithms/nas/modnas/data_provider/dataset/torch/image_cls.py @@ -1,20 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Datasets for image classification.""" + +from typing import Callable, Optional, Dict, List, Any import os -import numpy as np import torch +import numpy as np from torchvision import transforms, datasets from modnas.registry.dataset import register -from typing import Callable, Optional, Dict, List, Any from torch.utils.data.dataset import Dataset diff --git a/vega/algorithms/nas/modnas/data_provider/dataset/torch/rand.py b/vega/algorithms/nas/modnas/data_provider/dataset/torch/rand.py index 403f381..8748a77 100644 --- a/vega/algorithms/nas/modnas/data_provider/dataset/torch/rand.py +++ b/vega/algorithms/nas/modnas/data_provider/dataset/torch/rand.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Random tensor dataset.""" import torch diff --git a/vega/algorithms/nas/modnas/data_provider/predefined/default.py b/vega/algorithms/nas/modnas/data_provider/predefined/default.py index 30430ea..d0b6728 100644 --- a/vega/algorithms/nas/modnas/data_provider/predefined/default.py +++ b/vega/algorithms/nas/modnas/data_provider/predefined/default.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default DataProvider with Iterable.""" -from ..base import DataProviderBase -from modnas.registry.data_provider import register from typing import List, Optional, Any, Collection, Iterator +from modnas.registry.data_provider import register +from ..base import DataProviderBase @register diff --git a/vega/algorithms/nas/modnas/estim/base.py b/vega/algorithms/nas/modnas/estim/base.py index 7ed9eb9..59ffec1 100644 --- a/vega/algorithms/nas/modnas/estim/base.py +++ b/vega/algorithms/nas/modnas/estim/base.py @@ -1,23 +1,29 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Base Estimator.""" import traceback import threading -import pickle from modnas import backend from modnas.metrics import build_metrics_all from modnas.registry.export import build as build_exporter from modnas.core.event import event_hooked_subclass from modnas.utils.logging import get_logger from modnas.registry import streamline_spec +from vega.common import FileOps def build_criterions_all(crit_configs, device_ids=None): @@ -310,10 +316,10 @@ def save(self, epoch=None, save_name=None): epoch = epoch or self.cur_epoch try: chkpt = self.state_dict() - with open(chkpt_path, 'wb') as f: - pickle.dump(chkpt, f) - except RuntimeError: - logger.error("Failed saving estimator: {}".format(traceback.format_exc())) + FileOps.dump_pickle(chkpt, chkpt_path) + except RuntimeError as e: + logger.debug(traceback.format_exc()) + logger.error(f"Failed saving estimator: {e}") def save_checkpoint(self, epoch=None, save_name=None): """Save Estimator & model to file.""" @@ -334,14 +340,14 @@ def save_arch_desc(self, epoch=None, arch_desc=None, save_name=None, exporter='D save_path = expman.join('output', fname) try: build_exporter(exporter, path=save_path)(arch_desc) - except RuntimeError: - logger.error("Failed saving arch_desc: {}".format(traceback.format_exc())) + except RuntimeError as e: + logger.debug(traceback.format_exc()) + logger.error(f"Failed saving arch_desc, message: {e}") def load(self, chkpt_path): """Load states from file.""" if chkpt_path is None: return self.logger.info("Resuming from checkpoint: {}".format(chkpt_path)) - with open(chkpt_path, 'rb') as f: - chkpt = pickle.load(f) + chkpt = FileOps.load_pickle(chkpt_path) self.load_state_dict(chkpt) diff --git a/vega/algorithms/nas/modnas/estim/dist_backend/base.py b/vega/algorithms/nas/modnas/estim/dist_backend/base.py index 1725fd2..9349e9f 100644 --- a/vega/algorithms/nas/modnas/estim/dist_backend/base.py +++ b/vega/algorithms/nas/modnas/estim/dist_backend/base.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Distributed remote client and server.""" import threading diff --git a/vega/algorithms/nas/modnas/estim/dist_backend/managed.py b/vega/algorithms/nas/modnas/estim/dist_backend/managed.py index 9e94d95..bad926b 100644 --- a/vega/algorithms/nas/modnas/estim/dist_backend/managed.py +++ b/vega/algorithms/nas/modnas/estim/dist_backend/managed.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Managed list of remote clients.""" import threading diff --git a/vega/algorithms/nas/modnas/estim/dist_backend/rpyc.py b/vega/algorithms/nas/modnas/estim/dist_backend/rpyc.py index b143cd1..fcef857 100644 --- a/vega/algorithms/nas/modnas/estim/dist_backend/rpyc.py +++ b/vega/algorithms/nas/modnas/estim/dist_backend/rpyc.py @@ -1,19 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """RPyC remote server and client.""" import rpyc from rpyc.utils.server import ThreadedServer -from .base import RemoteBase from modnas.registry.dist_remote import register as register_remote from modnas.registry.dist_worker import register as register_worker +from .base import RemoteBase @register_remote diff --git a/vega/algorithms/nas/modnas/estim/predefined/default.py b/vega/algorithms/nas/modnas/estim/predefined/default.py index 63848df..502648e 100644 --- a/vega/algorithms/nas/modnas/estim/predefined/default.py +++ b/vega/algorithms/nas/modnas/estim/predefined/default.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Estimator with default training & evaluating methods.""" import itertools -from ..base import EstimBase from modnas.registry.estim import register +from ..base import EstimBase @register diff --git a/vega/algorithms/nas/modnas/estim/predefined/distributed.py b/vega/algorithms/nas/modnas/estim/predefined/distributed.py index fe97729..a286e4a 100644 --- a/vega/algorithms/nas/modnas/estim/predefined/distributed.py +++ b/vega/algorithms/nas/modnas/estim/predefined/distributed.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Distributed Estimator.""" -from ..base import EstimBase from modnas.registry.estim import register, build from modnas.registry.dist_remote import build as build_remote from modnas.registry.dist_worker import build as build_worker +from ..base import EstimBase @register diff --git a/vega/algorithms/nas/modnas/estim/predefined/hptune.py b/vega/algorithms/nas/modnas/estim/predefined/hptune.py index 2f473c6..c05cf6f 100644 --- a/vega/algorithms/nas/modnas/estim/predefined/hptune.py +++ b/vega/algorithms/nas/modnas/estim/predefined/hptune.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Hyperparameter-tuning Estimator.""" import copy @@ -14,10 +20,10 @@ import traceback import multiprocessing as mp import yaml -from ..base import EstimBase from modnas.utils.config import Config from modnas.utils.wrapper import run from modnas.registry.estim import register +from ..base import EstimBase def _default_trial_runner(conn, trial_args): @@ -75,9 +81,10 @@ def step(self, hp): try: score = self.measure_fn(hp, **fn_args) self.is_succ = True - except RuntimeError: + except RuntimeError as e: score = 0 - logger.info('trial {} failed with error: {}'.format(self.trial_index, traceback.format_exc())) + logger.debug(traceback.format_exc()) + logger.info(f'trial {self.trial_index} failed with error, message: {e}') result = { 'score': score, } diff --git a/vega/algorithms/nas/modnas/estim/predefined/pipeline.py b/vega/algorithms/nas/modnas/estim/predefined/pipeline.py index a722876..e1ed29e 100644 --- a/vega/algorithms/nas/modnas/estim/predefined/pipeline.py +++ b/vega/algorithms/nas/modnas/estim/predefined/pipeline.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Pipeline Estimator.""" import time -import yaml import traceback import threading import multiprocessing as mp -from ..base import EstimBase +import yaml from modnas.registry.estim import register from modnas.utils.wrapper import run +from ..base import EstimBase def _mp_step_runner(conn, step_conf): @@ -60,8 +66,9 @@ def exec_runner(self, pname): """Execute runner in a thread.""" try: ret = self.runner(self.config.pipeline[pname]) - except RuntimeError: - self.logger.info('pipeline step failed with error: {}'.format(traceback.format_exc())) + except RuntimeError as e: + self.logger.debug(traceback.format_exc()) + self.logger.info(f'pipeline step failed with error, message: {e}') self.failed.add(pname) ret = None self.step_done(pname, ret, None) diff --git a/vega/algorithms/nas/modnas/estim/predefined/regression.py b/vega/algorithms/nas/modnas/estim/predefined/regression.py index efb64ad..3225345 100644 --- a/vega/algorithms/nas/modnas/estim/predefined/regression.py +++ b/vega/algorithms/nas/modnas/estim/predefined/regression.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Regression Estimator.""" import itertools -from ..base import EstimBase from modnas.core.param_space import ParamSpace from modnas.registry.estim import register +from ..base import EstimBase @register diff --git a/vega/algorithms/nas/modnas/estim/predefined/subnet.py b/vega/algorithms/nas/modnas/estim/predefined/subnet.py index 2975f88..0381f24 100644 --- a/vega/algorithms/nas/modnas/estim/predefined/subnet.py +++ b/vega/algorithms/nas/modnas/estim/predefined/subnet.py @@ -1,20 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Subnet-based Estimator.""" import itertools import traceback -from ..base import EstimBase from modnas import backend from modnas.core.param_space import ParamSpace from modnas.registry.estim import register +from ..base import EstimBase @register @@ -34,8 +40,9 @@ def step(self, params): config = self.config try: self.construct_subnet(arch_desc) - except RuntimeError: - self.logger.info('subnet construct failed:\n{}'.format(traceback.format_exc())) + except RuntimeError as e: + self.logger.debug(traceback.format_exc()) + self.logger.info(f'subnet construct failed, message: {e}') ret = {'error_no': -1} return ret tot_epochs = config.subnet_epochs diff --git a/vega/algorithms/nas/modnas/estim/predefined/supernet.py b/vega/algorithms/nas/modnas/estim/predefined/supernet.py index 7d2dd6a..226d06a 100644 --- a/vega/algorithms/nas/modnas/estim/predefined/supernet.py +++ b/vega/algorithms/nas/modnas/estim/predefined/supernet.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Supernet-based Estimator.""" import itertools -from ..base import EstimBase from modnas.core.param_space import ParamSpace from modnas.registry.estim import register +from ..base import EstimBase @register diff --git a/vega/algorithms/nas/modnas/estim/predefined/unified.py b/vega/algorithms/nas/modnas/estim/predefined/unified.py index 2a77a93..9367c27 100644 --- a/vega/algorithms/nas/modnas/estim/predefined/unified.py +++ b/vega/algorithms/nas/modnas/estim/predefined/unified.py @@ -1,21 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Unified Estimator.""" import itertools -from ..base import EstimBase +from collections import OrderedDict +from typing import Dict, Optional, Any from modnas.core.param_space import ParamSpace from modnas.registry.estim import register from modnas.optim.base import OptimBase -from collections import OrderedDict -from typing import Dict, Optional, Any +from ..base import EstimBase @register diff --git a/vega/algorithms/nas/modnas/exec/local.py b/vega/algorithms/nas/modnas/exec/local.py index c84c056..4301ce7 100644 --- a/vega/algorithms/nas/modnas/exec/local.py +++ b/vega/algorithms/nas/modnas/exec/local.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Run ModularNAS routines locally.""" from modnas.utils.wrapper import run diff --git a/vega/algorithms/nas/modnas/exec/main.py b/vega/algorithms/nas/modnas/exec/main.py index ea20d8a..e84edc8 100644 --- a/vega/algorithms/nas/modnas/exec/main.py +++ b/vega/algorithms/nas/modnas/exec/main.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Run ModularNAS routines as main node.""" from modnas.utils.wrapper import run diff --git a/vega/algorithms/nas/modnas/ext_requirements.txt b/vega/algorithms/nas/modnas/ext_requirements.txt deleted file mode 100644 index eca67a4..0000000 --- a/vega/algorithms/nas/modnas/ext_requirements.txt +++ /dev/null @@ -1,19 +0,0 @@ -# Plotting -matplotlib - -# Tensorboard -tensorboardX - - -# Bayesian optimization -scikit-optimize - -# Score model -xgboost -scikit-learn - -# Distributed -rpyc - -# NASBench -tensorflow diff --git a/vega/algorithms/nas/modnas/metrics/__init__.py b/vega/algorithms/nas/modnas/metrics/__init__.py index d930a14..ed41859 100644 --- a/vega/algorithms/nas/modnas/metrics/__init__.py +++ b/vega/algorithms/nas/modnas/metrics/__init__.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict, Optional, Any from modnas.registry.metrics import build from modnas.registry import SPEC_TYPE from .base import MetricsBase -from typing import Dict, Optional, Any def build_metrics_all(mt_configs: Optional[SPEC_TYPE], estim: Optional[Any] = None) -> Dict[str, MetricsBase]: diff --git a/vega/algorithms/nas/modnas/metrics/base.py b/vega/algorithms/nas/modnas/metrics/base.py index 173bff6..2df7857 100644 --- a/vega/algorithms/nas/modnas/metrics/base.py +++ b/vega/algorithms/nas/modnas/metrics/base.py @@ -1,16 +1,22 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Implementation of Metrics interface.""" -from modnas.utils.logging import get_logger from typing import Any +from modnas.utils.logging import get_logger class MetricsBase(): diff --git a/vega/algorithms/nas/modnas/metrics/predefined/agg.py b/vega/algorithms/nas/modnas/metrics/predefined/agg.py index 1440e63..74b9a59 100644 --- a/vega/algorithms/nas/modnas/metrics/predefined/agg.py +++ b/vega/algorithms/nas/modnas/metrics/predefined/agg.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Aggregate metrics.""" from functools import reduce -from ..base import MetricsBase -from modnas.registry.metrics import register, build from typing import Dict, Any +from modnas.registry.metrics import register, build +from ..base import MetricsBase @register diff --git a/vega/algorithms/nas/modnas/metrics/predefined/estim.py b/vega/algorithms/nas/modnas/metrics/predefined/estim.py index e3fecf6..89840cc 100644 --- a/vega/algorithms/nas/modnas/metrics/predefined/estim.py +++ b/vega/algorithms/nas/modnas/metrics/predefined/estim.py @@ -1,16 +1,22 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Estimator-based metrics.""" -from ..base import MetricsBase from modnas.registry.metrics import register +from ..base import MetricsBase @register diff --git a/vega/algorithms/nas/modnas/metrics/predefined/stats.py b/vega/algorithms/nas/modnas/metrics/predefined/stats.py index ad507cf..25e023f 100644 --- a/vega/algorithms/nas/modnas/metrics/predefined/stats.py +++ b/vega/algorithms/nas/modnas/metrics/predefined/stats.py @@ -1,22 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Statistical metrics.""" - +from typing import List, Any, Optional import yaml -import pickle import numpy as np -from ..base import MetricsBase from modnas.registry.metrics import register, build from modnas.registry import SPEC_TYPE -from typing import List, Any, Optional +from vega.common import FileOps +from ..base import MetricsBase @register @@ -80,8 +85,7 @@ class StatsModelMetrics(MetricsBase): def __init__(self, model_path, head): super().__init__() - with open(model_path, 'rb') as f: - self.model = pickle.load(f) + self.model = FileOps.load_pickle(model_path) self.head = head def __call__(self, stats): diff --git a/vega/algorithms/nas/modnas/metrics/torch/rasp.py b/vega/algorithms/nas/modnas/metrics/torch/rasp.py index 28f042a..3088339 100644 --- a/vega/algorithms/nas/modnas/metrics/torch/rasp.py +++ b/vega/algorithms/nas/modnas/metrics/torch/rasp.py @@ -1,20 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """RASP-based metrics.""" -from ..base import MetricsBase + +from typing import List, Any, Union +from torch.nn.modules.module import Module from modnas.registry.metrics import register, build from modnas.arch_space.mixed_ops import MixedOp from modnas.registry import SPEC_TYPE -from torch.nn.modules.module import Module -from typing import List, Any, Union +from ..base import MetricsBase try: import rasp @@ -122,7 +129,8 @@ def __call__(self, net: Module) -> Any: continue mixop_node = F.get_stats_node(m) self.excluded.add(mixop_node) - assert mixop_node['in_shape'] is not None + if mixop_node['in_shape'] is None: + raise ValueError('Inshape of mixop is None.') mixop_mt = 0 m_in, _ = mixop_node['in_shape'], mixop_node['out_shape'] for p, (pn, op) in zip(m.prob(), m.named_candidates()): diff --git a/vega/algorithms/nas/modnas/metrics/torch/traversal.py b/vega/algorithms/nas/modnas/metrics/torch/traversal.py index bb62a35..b8599db 100644 --- a/vega/algorithms/nas/modnas/metrics/torch/traversal.py +++ b/vega/algorithms/nas/modnas/metrics/torch/traversal.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Network module traversal metrics.""" -from ..base import MetricsBase from modnas.registry.metrics import register, build from modnas.arch_space.mixed_ops import MixedOp +from ..base import MetricsBase @register diff --git a/vega/algorithms/nas/modnas/optim/base.py b/vega/algorithms/nas/modnas/optim/base.py index 7d9c2e9..bf2fe64 100644 --- a/vega/algorithms/nas/modnas/optim/base.py +++ b/vega/algorithms/nas/modnas/optim/base.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Basic Optimizer classes.""" import random +from typing import Any, Dict, List, Optional +from collections import OrderedDict from modnas import backend from modnas.core.param_space import ParamSpace from modnas.core.event import event_hooked_subclass from modnas.utils.logging import get_logger -from collections import OrderedDict from modnas.estim.base import EstimBase -from typing import Any, Dict, List, Optional @event_hooked_subclass diff --git a/vega/algorithms/nas/modnas/optim/model_optim/base.py b/vega/algorithms/nas/modnas/optim/model_optim/base.py index a9bc2fb..cd87488 100644 --- a/vega/algorithms/nas/modnas/optim/model_optim/base.py +++ b/vega/algorithms/nas/modnas/optim/model_optim/base.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Score model optimum finder.""" import random diff --git a/vega/algorithms/nas/modnas/optim/model_optim/sa.py b/vega/algorithms/nas/modnas/optim/model_optim/sa.py index d3e2972..c3b1b60 100644 --- a/vega/algorithms/nas/modnas/optim/model_optim/sa.py +++ b/vega/algorithms/nas/modnas/optim/model_optim/sa.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Simulated annealing model optimum finder.""" import heapq import random +from collections import OrderedDict +from typing import Any, List, Set, Union import numpy as np -from .base import ModelOptim from modnas.registry.model_optim import register from modnas.utils.logging import get_logger -from collections import OrderedDict -from typing import Any, List, Set, Union +from .base import ModelOptim logger = get_logger('model_optim') diff --git a/vega/algorithms/nas/modnas/optim/model_optim/sampling.py b/vega/algorithms/nas/modnas/optim/model_optim/sampling.py index f310565..dc778e7 100644 --- a/vega/algorithms/nas/modnas/optim/model_optim/sampling.py +++ b/vega/algorithms/nas/modnas/optim/model_optim/sampling.py @@ -1,16 +1,22 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Random sampling model optimum finder.""" -from .base import ModelOptim from modnas.registry.model_optim import register +from .base import ModelOptim @register diff --git a/vega/algorithms/nas/modnas/optim/predefined/genetic.py b/vega/algorithms/nas/modnas/optim/predefined/genetic.py index c849941..e5383bf 100644 --- a/vega/algorithms/nas/modnas/optim/predefined/genetic.py +++ b/vega/algorithms/nas/modnas/optim/predefined/genetic.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Genetic search algorithms.""" -import numpy as np import random -from ..base import CategoricalSpaceOptim +from collections import OrderedDict +from typing import Callable, Dict, List, Union, Optional +import numpy as np from modnas.registry.optim import register from modnas.core.param_space import ParamSpace from modnas.estim.base import EstimBase -from collections import OrderedDict -from typing import Callable, Dict, List, Union, Optional +from ..base import CategoricalSpaceOptim class GeneticOptim(CategoricalSpaceOptim): diff --git a/vega/algorithms/nas/modnas/optim/predefined/gridsearch.py b/vega/algorithms/nas/modnas/optim/predefined/gridsearch.py index 82ac3c7..5ee5a7d 100644 --- a/vega/algorithms/nas/modnas/optim/predefined/gridsearch.py +++ b/vega/algorithms/nas/modnas/optim/predefined/gridsearch.py @@ -1,21 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Basic categorical Optimizers.""" import time import random -from ..base import CategoricalSpaceOptim -from modnas.registry.optim import register -from modnas.core.param_space import ParamSpace from collections import OrderedDict from typing import Optional +from modnas.registry.optim import register +from modnas.core.param_space import ParamSpace +from ..base import CategoricalSpaceOptim @register diff --git a/vega/algorithms/nas/modnas/optim/predefined/model_based.py b/vega/algorithms/nas/modnas/optim/predefined/model_based.py index 0f987d8..fd67215 100644 --- a/vega/algorithms/nas/modnas/optim/predefined/model_based.py +++ b/vega/algorithms/nas/modnas/optim/predefined/model_based.py @@ -1,23 +1,29 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Model-based Optimizer.""" -from ..base import CategoricalSpaceOptim +from typing import Optional +from collections import OrderedDict from modnas.registry.score_model import build as build_score_model from modnas.registry.model_optim import build as build_model_optim from modnas.registry.optim import register from modnas.registry import SPEC_TYPE -from collections import OrderedDict from modnas.core.param_space import ParamSpace from modnas.estim.base import EstimBase -from typing import Optional +from ..base import CategoricalSpaceOptim @register diff --git a/vega/algorithms/nas/modnas/optim/score_model/base.py b/vega/algorithms/nas/modnas/optim/score_model/base.py index 0625be2..3e4b535 100644 --- a/vega/algorithms/nas/modnas/optim/score_model/base.py +++ b/vega/algorithms/nas/modnas/optim/score_model/base.py @@ -1,18 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Evaluation score prediction model.""" -import numpy as np + +from typing import List, Union from collections import OrderedDict +import numpy as np from numpy import ndarray -from typing import List, Union class ScoreModel(): diff --git a/vega/algorithms/nas/modnas/optim/score_model/sklearn.py b/vega/algorithms/nas/modnas/optim/score_model/sklearn.py index 179d5d0..e179fcc 100644 --- a/vega/algorithms/nas/modnas/optim/score_model/sklearn.py +++ b/vega/algorithms/nas/modnas/optim/score_model/sklearn.py @@ -1,33 +1,42 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Scikit-learn score prediction model.""" + import importlib +from collections import OrderedDict +from typing import List +from numpy import ndarray import numpy as np try: import sklearn except ImportError: sklearn = None -from .base import ScoreModel from modnas.registry.score_model import register -from collections import OrderedDict -from numpy import ndarray -from typing import List +from .base import ScoreModel @register class SKLearnScoreModel(ScoreModel): """Scikit-learn score prediction model class.""" - def __init__(self, space, model_cls, module, model_kwargs={}): + def __init__(self, space, model_cls, module, model_kwargs=None): super().__init__(space) + if model_kwargs is None: + model_kwargs = {} if sklearn is None: raise RuntimeError('scikit-learn is not installed') module = importlib.import_module(module) diff --git a/vega/algorithms/nas/modnas/optim/score_model/xgboost.py b/vega/algorithms/nas/modnas/optim/score_model/xgboost.py index d50ca48..0c5c343 100644 --- a/vega/algorithms/nas/modnas/optim/score_model/xgboost.py +++ b/vega/algorithms/nas/modnas/optim/score_model/xgboost.py @@ -1,24 +1,31 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """XGBoost score prediction model.""" + +from collections import OrderedDict +from typing import List +from numpy import ndarray import numpy as np try: import xgboost as xgb except ImportError: xgb = None -from .base import ScoreModel from modnas.registry.score_model import register -from collections import OrderedDict -from numpy import ndarray -from typing import List +from .base import ScoreModel xgb_params_reg = { @@ -48,8 +55,10 @@ class XGBoostScoreModel(ScoreModel): """XGBoost score prediction model class.""" - def __init__(self, space, loss_type='reg', xgb_kwargs={}): + def __init__(self, space, loss_type='reg', xgb_kwargs=None): super().__init__(space) + if xgb_kwargs is None: + xgb_kwargs = {} if xgb is None: raise RuntimeError('xgboost is not installed') xgb_params = xgb_params_rank if loss_type == 'rank' else xgb_params_reg diff --git a/vega/algorithms/nas/modnas/optim/torch/gradient_based.py b/vega/algorithms/nas/modnas/optim/torch/gradient_based.py index cf82900..9defe9c 100644 --- a/vega/algorithms/nas/modnas/optim/torch/gradient_based.py +++ b/vega/algorithms/nas/modnas/optim/torch/gradient_based.py @@ -1,26 +1,33 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Optimizer operating on tensor parameters.""" + import math import copy +from typing import Any, List, Optional, Tuple, Dict import torch -from ..base import GradientBasedOptim +from torch import Tensor +from torch.nn.modules.module import Module +from torch.optim.optimizer import Optimizer from modnas.core.param_space import ParamSpace from modnas.arch_space.mixed_ops import MixedOp from modnas.registry.optim import register from modnas.estim.base import EstimBase -from torch import Tensor -from torch.nn.modules.module import Module -from torch.optim.optimizer import Optimizer -from typing import Any, List, Optional, Tuple, Dict +from ..base import GradientBasedOptim OPTIM_CONF_TYPE = Optional[Dict[str, Any]] diff --git a/vega/algorithms/nas/modnas/registry/__init__.py b/vega/algorithms/nas/modnas/registry/__init__.py index 6bad460..9504e4e 100644 --- a/vega/algorithms/nas/modnas/registry/__init__.py +++ b/vega/algorithms/nas/modnas/registry/__init__.py @@ -1,23 +1,29 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Registry for framework components.""" import sys +import types import importlib.util from importlib.abc import Loader, MetaPathFinder from importlib.machinery import ModuleSpec from functools import partial -from .registry import registry from typing import Any, Callable, Dict, List, Optional, Tuple, Sequence, Union from types import ModuleType -import types +from .registry import registry SPEC_TYPE = Union[str, Tuple[str, ...], List[Any], Dict[str, Any]] diff --git a/vega/algorithms/nas/modnas/registry/registry.py b/vega/algorithms/nas/modnas/registry/registry.py index 4ddfbd6..bab93f4 100644 --- a/vega/algorithms/nas/modnas/registry/registry.py +++ b/vega/algorithms/nas/modnas/registry/registry.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default registry.""" import logging diff --git a/vega/algorithms/nas/modnas/registry/vega_registry.py b/vega/algorithms/nas/modnas/registry/vega_registry.py index 49eef19..d90618e 100644 --- a/vega/algorithms/nas/modnas/registry/vega_registry.py +++ b/vega/algorithms/nas/modnas/registry/vega_registry.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """VEGA Registry.""" diff --git a/vega/algorithms/nas/modnas/trainer/base.py b/vega/algorithms/nas/modnas/trainer/base.py index 5b21d22..541212d 100644 --- a/vega/algorithms/nas/modnas/trainer/base.py +++ b/vega/algorithms/nas/modnas/trainer/base.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Base Trainer.""" from modnas.utils.logging import get_logger diff --git a/vega/algorithms/nas/modnas/trainer/torch/default.py b/vega/algorithms/nas/modnas/trainer/torch/default.py index c0376c2..afb8066 100644 --- a/vega/algorithms/nas/modnas/trainer/torch/default.py +++ b/vega/algorithms/nas/modnas/trainer/torch/default.py @@ -1,24 +1,31 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default Trainer.""" + +from typing import Dict, Optional, Any import torch import torch.nn as nn -from modnas import backend -from ..base import TrainerBase -from modnas.registry.trainer import register -from modnas.estim.base import EstimBase from torch import Tensor from torch.nn.modules.module import Module -from typing import Dict, Optional, Any from modnas.registry import SPEC_TYPE +from modnas import backend +from modnas.registry.trainer import register +from modnas.estim.base import EstimBase +from ..base import TrainerBase @register diff --git a/vega/algorithms/nas/modnas/trainer/torch/image_cls.py b/vega/algorithms/nas/modnas/trainer/torch/image_cls.py index 5ee3fb3..493e1c6 100644 --- a/vega/algorithms/nas/modnas/trainer/torch/image_cls.py +++ b/vega/algorithms/nas/modnas/trainer/torch/image_cls.py @@ -1,19 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Image classification Trainer.""" import torch import torch.nn as nn from modnas import backend -from ..base import TrainerBase from modnas.registry.trainer import register +from ..base import TrainerBase def accuracy(output, target, topk=(1, )): diff --git a/vega/algorithms/nas/modnas/utils/__init__.py b/vega/algorithms/nas/modnas/utils/__init__.py index ec7e35f..fdbb54f 100644 --- a/vega/algorithms/nas/modnas/utils/__init__.py +++ b/vega/algorithms/nas/modnas/utils/__init__.py @@ -1,29 +1,36 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. import sys +import re +import os import time import inspect import importlib -import numpy as np import hashlib from functools import partial +from typing import Callable, Dict, List, Optional, Union, Any +import numpy as np from modnas.version import __version__ -from .logging import get_logger from modnas import backend as be -from typing import Callable, Dict, List, Optional, Union, Any - try: from tensorboardX import SummaryWriter except ImportError: SummaryWriter = None +from .logging import get_logger logger = get_logger('utils') @@ -39,18 +46,10 @@ def import_file(path, name=None): return module -def exec_file(path): - """Execute file and return globals.""" - with open(path, 'rb') as fp: - code = compile(fp.read(), path, 'exec') - globs = { - '__file__': path, - '__name__': '__main__', - '__package__': None, - '__cached__': None, - } - exec(code, globs, None) - return globs +def check_value(value, pattern): + """Check value.""" + if isinstance(value, str) and len(re.compile(pattern).findall(value)) > 0: + raise ValueError(f"{value} contains invalid characters.") def import_modules(modules: List[str]) -> None: @@ -77,7 +76,8 @@ def import_modules(modules: List[str]) -> None: def get_exp_name(config): """Return experiment name.""" - return '{}.{}'.format(time.strftime('%Y%m%d', time.localtime()), hashlib.sha1(str(config).encode()).hexdigest()[:4]) + return '{}.{}'.format(time.strftime('%Y%m%d', time.localtime()), + hashlib.sha256(str(config).encode()).hexdigest()[:4]) def env_info() -> str: @@ -181,13 +181,18 @@ def copy_members( def get_same_padding(kernel_size: int) -> int: """Return SAME padding size for convolutions.""" if isinstance(kernel_size, tuple): - assert len(kernel_size) == 2, 'invalid kernel size: %s' % kernel_size + if len(kernel_size) != 2: + raise ValueError('invalid kernel size: %s' % kernel_size) p1 = get_same_padding(kernel_size[0]) p2 = get_same_padding(kernel_size[1]) return p1, p2 - assert isinstance(kernel_size, int), 'kernel size should be either `int` or `tuple`' - assert kernel_size % 2 > 0, 'kernel size should be odd number' - return kernel_size // 2 + if isinstance(kernel_size, int): + if kernel_size % 2 > 0: + return kernel_size // 2 + else: + raise ValueError('kernel size should be odd number') + else: + raise ValueError('kernel size should be either `int` or `tuple`') class AverageMeter(): diff --git a/vega/algorithms/nas/modnas/utils/config.py b/vega/algorithms/nas/modnas/utils/config.py index 9a080a1..ffdbd83 100644 --- a/vega/algorithms/nas/modnas/utils/config.py +++ b/vega/algorithms/nas/modnas/utils/config.py @@ -1,19 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Dictionary based configuration.""" -# modified from https://github.com/HarryVolek/PyTorch_Speaker_Verification -import yaml + +from typing import Dict, Optional, Any import copy import logging -from typing import Dict, Optional, Any +import yaml logger = logging.getLogger('modnas.config') diff --git a/vega/algorithms/nas/modnas/utils/exp_manager.py b/vega/algorithms/nas/modnas/utils/exp_manager.py index 41890fc..def6fd7 100644 --- a/vega/algorithms/nas/modnas/utils/exp_manager.py +++ b/vega/algorithms/nas/modnas/utils/exp_manager.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Experiment file manager.""" import os import time -from .logging import get_logger from typing import Optional +from .logging import get_logger class ExpManager(): diff --git a/vega/algorithms/nas/modnas/utils/logging.py b/vega/algorithms/nas/modnas/utils/logging.py index 7ad753d..9eb2c69 100644 --- a/vega/algorithms/nas/modnas/utils/logging.py +++ b/vega/algorithms/nas/modnas/utils/logging.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Manage logging states and loggers.""" import os @@ -14,9 +20,9 @@ import copy import logging import logging.config -from modnas.utils.config import merge_config from logging import Logger from typing import Optional, Dict, Any +from modnas.utils.config import merge_config DEFAULT_LOGGING_CONF = { diff --git a/vega/algorithms/nas/modnas/utils/predefined.py b/vega/algorithms/nas/modnas/utils/predefined.py index ea70d38..c11dcd2 100644 --- a/vega/algorithms/nas/modnas/utils/predefined.py +++ b/vega/algorithms/nas/modnas/utils/predefined.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Predefined components.""" import modnas.arch_space.predefined diff --git a/vega/algorithms/nas/modnas/utils/wrapper.py b/vega/algorithms/nas/modnas/utils/wrapper.py index 37bf403..6c9583a 100644 --- a/vega/algorithms/nas/modnas/utils/wrapper.py +++ b/vega/algorithms/nas/modnas/utils/wrapper.py @@ -1,19 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Wrapper for routine initialization and execution.""" import argparse from collections import OrderedDict from functools import partial -from .exp_manager import ExpManager -from .config import Config from modnas.core.event import EventManager from modnas.core.param_space import ParamSpace from modnas.registry.construct import build as build_con @@ -25,8 +29,10 @@ from modnas.registry import parse_spec, to_spec from modnas.utils.config import merge_config from modnas import utils -from .logging import configure_logging, get_logger from modnas import backend as be +from .exp_manager import ExpManager +from .config import Config +from .logging import configure_logging, get_logger logger = get_logger() diff --git a/vega/algorithms/nas/modnas/vega_requirements.txt b/vega/algorithms/nas/modnas/vega_requirements.txt new file mode 100644 index 0000000..4a1fe4b --- /dev/null +++ b/vega/algorithms/nas/modnas/vega_requirements.txt @@ -0,0 +1,7 @@ +pandas +scikit-learn +scipy +pyyaml +zmq +requests +psutil diff --git a/vega/algorithms/nas/opt_nas/ops_nas.py b/vega/algorithms/nas/opt_nas/ops_nas.py index 1f3f122..5df405c 100644 --- a/vega/algorithms/nas/opt_nas/ops_nas.py +++ b/vega/algorithms/nas/opt_nas/ops_nas.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is Operator SearchSpace.""" from vega.common import ClassFactory, ClassType, SearchableRegister, Searchable, space, change_space from vega.core.search_space import SearchSpace diff --git a/vega/algorithms/nas/segmentation_ea/conf.py b/vega/algorithms/nas/segmentation_ea/conf.py index 30e5ba4..7e2bf89 100644 --- a/vega/algorithms/nas/segmentation_ea/conf.py +++ b/vega/algorithms/nas/segmentation_ea/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/nas/segmentation_ea/segmentation_codec.py b/vega/algorithms/nas/segmentation_ea/segmentation_codec.py index 5cb2915..f0f8095 100644 --- a/vega/algorithms/nas/segmentation_ea/segmentation_codec.py +++ b/vega/algorithms/nas/segmentation_ea/segmentation_codec.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Codec of Prune EA.""" import re diff --git a/vega/algorithms/nas/segmentation_ea/segmentation_ea_trainercallback.py b/vega/algorithms/nas/segmentation_ea/segmentation_ea_trainercallback.py index 47caa12..c20f768 100644 --- a/vega/algorithms/nas/segmentation_ea/segmentation_ea_trainercallback.py +++ b/vega/algorithms/nas/segmentation_ea/segmentation_ea_trainercallback.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The trainer program for SegmentationEA.""" -import vega import logging import torch +import vega from vega.common import ClassFactory, ClassType from vega.metrics import calc_model_flops_params from vega.trainer.callbacks import Callback diff --git a/vega/algorithms/nas/segmentation_ea/segmentation_mutate.py b/vega/algorithms/nas/segmentation_ea/segmentation_mutate.py index 751f571..ce5fa48 100644 --- a/vega/algorithms/nas/segmentation_ea/segmentation_mutate.py +++ b/vega/algorithms/nas/segmentation_ea/segmentation_mutate.py @@ -1,20 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Mutate search algorithm used to search BiSeNet code.""" import random import copy import logging import numpy as np -from .conf import SegmentationConfig from vega.report import ReportServer +from .conf import SegmentationConfig class SegmentationMutate(object): diff --git a/vega/algorithms/nas/segmentation_ea/segmentation_nas.py b/vega/algorithms/nas/segmentation_ea/segmentation_nas.py index 7c37dc4..c41b186 100644 --- a/vega/algorithms/nas/segmentation_ea/segmentation_nas.py +++ b/vega/algorithms/nas/segmentation_ea/segmentation_nas.py @@ -1,21 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Search algorithm used to search BiSeNet code. Include random search and mutate search.""" from copy import deepcopy -from .conf import SegmentationConfig -from .segmentation_random import SegmentationRandom -from .segmentation_mutate import SegmentationMutate from vega.common import ClassFactory, ClassType from vega.core.search_algs import SearchAlgorithm from vega.report import ReportServer +from .conf import SegmentationConfig +from .segmentation_random import SegmentationRandom +from .segmentation_mutate import SegmentationMutate @ClassFactory.register(ClassType.SEARCH_ALGORITHM) diff --git a/vega/algorithms/nas/segmentation_ea/segmentation_random.py b/vega/algorithms/nas/segmentation_ea/segmentation_random.py index dd990ec..44e687c 100644 --- a/vega/algorithms/nas/segmentation_ea/segmentation_random.py +++ b/vega/algorithms/nas/segmentation_ea/segmentation_random.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Random search algorithm used to search BiSeNet code.""" diff --git a/vega/algorithms/nas/sgas/__init__.py b/vega/algorithms/nas/sgas/__init__.py index 074a7c3..9641f91 100644 --- a/vega/algorithms/nas/sgas/__init__.py +++ b/vega/algorithms/nas/sgas/__init__.py @@ -1 +1 @@ -from .sgas_trainer_callback import * +from .sgas_trainer_callback import SGASTrainerCallback diff --git a/vega/algorithms/nas/sgas/sgas_trainer_callback.py b/vega/algorithms/nas/sgas/sgas_trainer_callback.py index 55b45ff..094dd34 100644 --- a/vega/algorithms/nas/sgas/sgas_trainer_callback.py +++ b/vega/algorithms/nas/sgas/sgas_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """SGAS trainer.""" diff --git a/vega/algorithms/nas/sm_nas/smnas_codec.py b/vega/algorithms/nas/sm_nas/smnas_codec.py index 2f95f51..62a4776 100644 --- a/vega/algorithms/nas/sm_nas/smnas_codec.py +++ b/vega/algorithms/nas/sm_nas/smnas_codec.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined BackboneNasCodec.""" from vega.common import ClassType, ClassFactory diff --git a/vega/algorithms/nas/sp_nas/__init__.py b/vega/algorithms/nas/sp_nas/__init__.py index 640b29a..b868f6f 100644 --- a/vega/algorithms/nas/sp_nas/__init__.py +++ b/vega/algorithms/nas/sp_nas/__init__.py @@ -1,6 +1,6 @@ -from .spnas_s import * -from .spnas_p import * -from .reignition import ReignitionCallback import vega if vega.is_ms_backend(): from .spnas_trainer_callback import SpNasTrainerCallback +from .spnas_s import SpNasS +from .spnas_p import SpNasP +from .reignition import ReignitionCallback diff --git a/vega/algorithms/nas/sp_nas/conf.py b/vega/algorithms/nas/sp_nas/conf.py index 814048f..9dc8c79 100644 --- a/vega/algorithms/nas/sp_nas/conf.py +++ b/vega/algorithms/nas/sp_nas/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/nas/sp_nas/reignition.py b/vega/algorithms/nas/sp_nas/reignition.py index 08964f0..91882b9 100644 --- a/vega/algorithms/nas/sp_nas/reignition.py +++ b/vega/algorithms/nas/sp_nas/reignition.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SPNAS Reignition Callback.""" import logging import copy diff --git a/vega/algorithms/nas/sp_nas/spnas_p.py b/vega/algorithms/nas/sp_nas/spnas_p.py index 8c71aaf..7690ce6 100644 --- a/vega/algorithms/nas/sp_nas/spnas_p.py +++ b/vega/algorithms/nas/sp_nas/spnas_p.py @@ -1,12 +1,28 @@ -"""The second stage of SMNAS.""" +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The second stage of SP-NAS.""" import logging import random +import numpy as np from vega.common import ClassFactory, ClassType from vega.core.search_algs import SearchAlgorithm -import numpy as np -from .conf import SpNasConfig from vega.report import ReportServer +from .conf import SpNasConfig @ClassFactory.register(ClassType.SEARCH_ALGORITHM) diff --git a/vega/algorithms/nas/sp_nas/spnas_s.py b/vega/algorithms/nas/sp_nas/spnas_s.py index e1af6c4..37b5ee4 100644 --- a/vega/algorithms/nas/sp_nas/spnas_s.py +++ b/vega/algorithms/nas/sp_nas/spnas_s.py @@ -1,12 +1,29 @@ -"""The second stage of SMNAS.""" +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The second stage of SP-NAS.""" + import copy import logging from random import SystemRandom +import numpy as np from vega.common import ClassFactory, ClassType from vega.core.search_algs import SearchAlgorithm -import numpy as np -from .conf import SpNasConfig from vega.report import ReportServer +from .conf import SpNasConfig @ClassFactory.register(ClassType.SEARCH_ALGORITHM) diff --git a/vega/algorithms/nas/sp_nas/spnas_trainer_callback.py b/vega/algorithms/nas/sp_nas/spnas_trainer_callback.py index db7f221..6360823 100644 --- a/vega/algorithms/nas/sp_nas/spnas_trainer_callback.py +++ b/vega/algorithms/nas/sp_nas/spnas_trainer_callback.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The trainer program for Auto Lane.""" @@ -15,54 +21,55 @@ import time import numpy as np from pycocotools.coco import COCO -from vega.common import ClassFactory, ClassType -from vega.trainer.trainer_ms import TrainerMs from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor import mindspore.common.dtype as mstype from mindspore.train import Model as MsModel from mindspore import Tensor from mindspore.nn import SGD +from vega.common import ClassFactory, ClassType +from vega.trainer.trainer_ms import TrainerMs +from vega.datasets.conf.dataset import DatasetConfig from .src.model_utils.config import config from .src.dataset import data_to_mindrecord_byte_image, create_fasterrcnn_dataset from .src.lr_schedule import dynamic_lr from .src.network_define import WithLossCell, TrainOneStepCell, LossNet from .src.util import coco_eval, bbox2result_1image, results2json -from vega.datasets.conf.dataset import DatasetConfig logger = logging.getLogger(__name__) def valid(): """Construct the trainer of SpNas.""" - config = DatasetConfig().to_dict() - config = config['_class_data'].val + config_val = DatasetConfig().to_dict() + config_val = config_val['_class_data'].val prefix = "FasterRcnn_eval.mindrecord" - mindrecord_dir = config.mindrecord_dir + mindrecord_dir = config_val.mindrecord_dir mindrecord_file = os.path.join(mindrecord_dir, prefix) if not os.path.exists(mindrecord_file): if not os.path.isdir(mindrecord_dir): os.makedirs(mindrecord_dir) - if config.dataset == "coco": - if os.path.isdir(config.coco_root): - data_to_mindrecord_byte_image(config, "coco", False, prefix, file_num=1) + if config_val.dataset == "coco": + if os.path.isdir(config_val.coco_root): + data_to_mindrecord_byte_image(config_val, "coco", False, prefix, file_num=1) else: logging.info("coco_root not exits.") else: - if os.path.isdir(config.IMAGE_DIR) and os.path.exists(config.ANNO_PATH): - data_to_mindrecord_byte_image(config, "other", False, prefix, file_num=1) + if os.path.isdir(config_val.IMAGE_DIR) and os.path.exists(config_val.ANNO_PATH): + data_to_mindrecord_byte_image(config_val, "other", False, prefix, file_num=1) else: logging.info("IMAGE_DIR or ANNO_PATH not exits.") - dataset = create_fasterrcnn_dataset(config, mindrecord_file, batch_size=config.test_batch_size, is_training=False) + dataset = create_fasterrcnn_dataset(config_val, mindrecord_file, batch_size=config_val.test_batch_size, + is_training=False) return dataset def train(): """Train fasterrcnn dataset.""" - config = DatasetConfig().to_dict() - config = config['_class_data'].train + config_train = DatasetConfig().to_dict() + config_train = config_train['_class_data'].train prefix = "FasterRcnn.mindrecord" - mindrecord_dir = config.mindrecord_dir + mindrecord_dir = config_train.mindrecord_dir mindrecord_file = os.path.join(mindrecord_dir, prefix + "0") print("CHECKING MINDRECORD FILES ...") rank = int(os.getenv('RANK_ID', '0')) @@ -72,28 +79,28 @@ def train(): if not os.path.isdir(mindrecord_dir): os.makedirs(mindrecord_dir) if config.dataset == "coco": - if os.path.isdir(config.coco_root): - if not os.path.exists(config.coco_root): + if os.path.isdir(config_train.coco_root): + if not os.path.exists(config_train.coco_root): logging.info("Please make sure config:coco_root is valid.") - raise ValueError(config.coco_root) - data_to_mindrecord_byte_image(config, "coco", True, prefix) + raise ValueError(config_train.coco_root) + data_to_mindrecord_byte_image(config_train, "coco", True, prefix) else: logging.info("coco_root not exits.") else: - if os.path.isdir(config.image_dir) and os.path.exists(config.anno_path): - if not os.path.exists(config.image_dir): + if os.path.isdir(config_train.image_dir) and os.path.exists(config_train.anno_path): + if not os.path.exists(config_train.image_dir): logging.info("Please make sure config:image_dir is valid.") - raise ValueError(config.image_dir) - data_to_mindrecord_byte_image(config, "other", True, prefix) + raise ValueError(config_train.image_dir) + data_to_mindrecord_byte_image(config_train, "other", True, prefix) else: logging.info("image_dir or anno_path not exits.") while not os.path.exists(mindrecord_file + ".db"): time.sleep(5) - dataset = create_fasterrcnn_dataset(config, mindrecord_file, batch_size=config.batch_size, + dataset = create_fasterrcnn_dataset(config_train, mindrecord_file, batch_size=config_train.batch_size, device_num=device_num, rank_id=rank, - num_parallel_workers=config.num_parallel_workers, - python_multiprocessing=config.python_multiprocessing) + num_parallel_workers=config_train.num_parallel_workers, + python_multiprocessing=config_train.python_multiprocessing) return dataset @@ -160,17 +167,17 @@ def _valid_epoch(self): gt_labels = data['label'] gt_num = data['valid_num'] output = self.model(img_data, img_metas, gt_bboxes, gt_labels, gt_num) - all_bbox = output[0] - all_label = output[1] - all_mask = output[2] + all_output_bbox = output[0] + all_output_label = output[1] + all_output_mask = output[2] for j in range(config.test_batch_size): - all_bbox_squee = np.squeeze(all_bbox.asnumpy()[j, :, :]) - all_label_squee = np.squeeze(all_label.asnumpy()[j, :, :]) - all_mask_squee = np.squeeze(all_mask.asnumpy()[j, :, :]) + all_output_bbox_squee = np.squeeze(all_output_bbox.asnumpy()[j, :, :]) + all_output_label_squee = np.squeeze(all_output_label.asnumpy()[j, :, :]) + all_output_mask_squee = np.squeeze(all_output_mask.asnumpy()[j, :, :]) - all_bboxes_tmp_mask = all_bbox_squee[all_mask_squee, :] - all_labels_tmp_mask = all_label_squee[all_mask_squee] + all_bboxes_tmp_mask = all_output_bbox_squee[all_output_mask_squee, :] + all_labels_tmp_mask = all_output_label_squee[all_output_mask_squee] if all_bboxes_tmp_mask.shape[0] > max_num: inds = np.argsort(-all_bboxes_tmp_mask[:, -1]) diff --git a/vega/algorithms/nas/sp_nas/src/dataset.py b/vega/algorithms/nas/sp_nas/src/dataset.py index 441d9a5..593850d 100644 --- a/vega/algorithms/nas/sp_nas/src/dataset.py +++ b/vega/algorithms/nas/sp_nas/src/dataset.py @@ -1,12 +1,18 @@ -# -*- coding:utf-8 -*- +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. """FasterRcnn dataset.""" from __future__ import division @@ -34,8 +40,8 @@ def bbox_overlaps(bboxes1, bboxes2, mode='iou'): Returns: ious(ndarray): shape (n, k) """ - assert mode in ['iou', 'iof'] - + if mode not in ['iou', 'iof']: + raise ValueError('Mode is wrong.') bboxes1 = bboxes1.astype(np.float32) bboxes2 = bboxes2.astype(np.float32) rows = bboxes1.shape[0] @@ -170,13 +176,14 @@ def rescale_column(img, img_shape, gt_bboxes, gt_label, gt_num, config): pad_h = config.img_height - img_data.shape[0] pad_w = config.img_width - img_data.shape[1] - assert ((pad_h >= 0) and (pad_w >= 0)) - - pad_img_data = np.zeros((config.img_height, config.img_width, 3)).astype(img_data.dtype) - pad_img_data[0:img_data.shape[0], 0:img_data.shape[1], :] = img_data + if ((pad_h >= 0) and (pad_w >= 0)): + pad_img_data = np.zeros((config.img_height, config.img_width, 3)).astype(img_data.dtype) + pad_img_data[0:img_data.shape[0], 0:img_data.shape[1], :] = img_data - img_shape = (config.img_height, config.img_width, 1.0) - img_shape = np.asarray(img_shape, dtype=np.float32) + img_shape = (config.img_height, config.img_width, 1.0) + img_shape = np.asarray(img_shape, dtype=np.float32) + else: + raise ValueError('pad_h and pad_w are wrong.') return (pad_img_data, img_shape, gt_bboxes, gt_label, gt_num) @@ -190,14 +197,15 @@ def rescale_column_test(img, img_shape, gt_bboxes, gt_label, gt_num, config): pad_h = config.img_height - img_data.shape[0] pad_w = config.img_width - img_data.shape[1] - assert ((pad_h >= 0) and (pad_w >= 0)) - - pad_img_data = np.zeros((config.img_height, config.img_width, 3)).astype(img_data.dtype) - pad_img_data[0:img_data.shape[0], 0:img_data.shape[1], :] = img_data + if ((pad_h >= 0) and (pad_w >= 0)): - img_shape = np.append(img_shape, (scale_factor, scale_factor)) - img_shape = np.asarray(img_shape, dtype=np.float32) + pad_img_data = np.zeros((config.img_height, config.img_width, 3)).astype(img_data.dtype) + pad_img_data[0:img_data.shape[0], 0:img_data.shape[1], :] = img_data + img_shape = np.append(img_shape, (scale_factor, scale_factor)) + img_shape = np.asarray(img_shape, dtype=np.float32) + else: + raise ValueError('pad_h and pad_w are wrong.') return (pad_img_data, img_shape, gt_bboxes, gt_label, gt_num) diff --git a/vega/algorithms/nas/sp_nas/src/lr_schedule.py b/vega/algorithms/nas/sp_nas/src/lr_schedule.py index 2832211..4bdb7f6 100644 --- a/vega/algorithms/nas/sp_nas/src/lr_schedule.py +++ b/vega/algorithms/nas/sp_nas/src/lr_schedule.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lr generator for fasterrcnn.""" import math diff --git a/vega/algorithms/nas/sp_nas/src/model_utils/config.py b/vega/algorithms/nas/sp_nas/src/model_utils/config.py index 84f88de..a8f6958 100644 --- a/vega/algorithms/nas/sp_nas/src/model_utils/config.py +++ b/vega/algorithms/nas/sp_nas/src/model_utils/config.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Parse arguments.""" diff --git a/vega/algorithms/nas/sp_nas/src/network_define.py b/vega/algorithms/nas/sp_nas/src/network_define.py index 63bb5a4..48f29dd 100644 --- a/vega/algorithms/nas/sp_nas/src/network_define.py +++ b/vega/algorithms/nas/sp_nas/src/network_define.py @@ -1,12 +1,17 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ """FasterRcnn training network wrapper.""" import time diff --git a/vega/algorithms/nas/sp_nas/src/util.py b/vega/algorithms/nas/sp_nas/src/util.py index d6872b2..cda614d 100644 --- a/vega/algorithms/nas/sp_nas/src/util.py +++ b/vega/algorithms/nas/sp_nas/src/util.py @@ -1,12 +1,17 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ """Coco eval for fasterrcnn.""" import json import numpy as np @@ -39,61 +44,63 @@ def coco_eval(result_files, result_types, coco, max_dets=(100, 300, 1000), singl if mmcv.is_str(coco): coco = COCO(coco) - assert isinstance(coco, COCO) - - for res_type in result_types: - result_file = result_files[res_type] - assert result_file.endswith('.json') - - coco_dets = coco.loadRes(result_file) - gt_img_ids = coco.getImgIds() - det_img_ids = coco_dets.getImgIds() - iou_type = 'bbox' if res_type == 'proposal' else res_type - cocoEval = COCOeval(coco, coco_dets, iou_type) - if res_type == 'proposal': - cocoEval.params.useCats = 0 - cocoEval.params.maxDets = list(max_dets) - - tgt_ids = gt_img_ids if not single_result else det_img_ids - - if single_result: - res_dict = dict() - for id_i in tgt_ids: + if isinstance(coco, COCO): + + for res_type in result_types: + result_file = result_files[res_type] + if result_file.endswith('.json'): + + coco_dets = coco.loadRes(result_file) + gt_img_ids = coco.getImgIds() + det_img_ids = coco_dets.getImgIds() + iou_type = 'bbox' if res_type == 'proposal' else res_type + cocoEval = COCOeval(coco, coco_dets, iou_type) + if res_type == 'proposal': + cocoEval.params.useCats = 0 + cocoEval.params.maxDets = list(max_dets) + + tgt_ids = gt_img_ids if not single_result else det_img_ids + + if single_result: + res_dict = dict() + for id_i in tgt_ids: + cocoEval = COCOeval(coco, coco_dets, iou_type) + if res_type == 'proposal': + cocoEval.params.useCats = 0 + cocoEval.params.maxDets = list(max_dets) + + cocoEval.params.imgIds = [id_i] + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + res_dict.update({coco.imgs[id_i]['file_name']: cocoEval.stats[1]}) + cocoEval = COCOeval(coco, coco_dets, iou_type) if res_type == 'proposal': cocoEval.params.useCats = 0 cocoEval.params.maxDets = list(max_dets) - cocoEval.params.imgIds = [id_i] + cocoEval.params.imgIds = tgt_ids cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() - res_dict.update({coco.imgs[id_i]['file_name']: cocoEval.stats[1]}) - - cocoEval = COCOeval(coco, coco_dets, iou_type) - if res_type == 'proposal': - cocoEval.params.useCats = 0 - cocoEval.params.maxDets = list(max_dets) - - cocoEval.params.imgIds = tgt_ids - cocoEval.evaluate() - cocoEval.accumulate() - cocoEval.summarize() - - summary_metrics = { - 'Precision/mAP': cocoEval.stats[0], - 'Precision/mAP@.50IOU': cocoEval.stats[1], - 'Precision/mAP@.75IOU': cocoEval.stats[2], - 'Precision/mAP (small)': cocoEval.stats[3], - 'Precision/mAP (medium)': cocoEval.stats[4], - 'Precision/mAP (large)': cocoEval.stats[5], - 'Recall/AR@1': cocoEval.stats[6], - 'Recall/AR@10': cocoEval.stats[7], - 'Recall/AR@100': cocoEval.stats[8], - 'Recall/AR@100 (small)': cocoEval.stats[9], - 'Recall/AR@100 (medium)': cocoEval.stats[10], - 'Recall/AR@100 (large)': cocoEval.stats[11], - } + + summary_metrics = { + 'Precision/mAP': cocoEval.stats[0], + 'Precision/mAP@.50IOU': cocoEval.stats[1], + 'Precision/mAP@.75IOU': cocoEval.stats[2], + 'Precision/mAP (small)': cocoEval.stats[3], + 'Precision/mAP (medium)': cocoEval.stats[4], + 'Precision/mAP (large)': cocoEval.stats[5], + 'Recall/AR@1': cocoEval.stats[6], + 'Recall/AR@10': cocoEval.stats[7], + 'Recall/AR@100': cocoEval.stats[8], + 'Recall/AR@100 (small)': cocoEval.stats[9], + 'Recall/AR@100 (medium)': cocoEval.stats[10], + 'Recall/AR@100 (large)': cocoEval.stats[11], + } + else: + raise ValueError('Type of coco is wrong.') return summary_metrics diff --git a/vega/algorithms/nas/sr_ea/conf.py b/vega/algorithms/nas/sr_ea/conf.py index ac8b36b..5c493f8 100644 --- a/vega/algorithms/nas/sr_ea/conf.py +++ b/vega/algorithms/nas/sr_ea/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/algorithms/nas/sr_ea/sr_ea_codec.py b/vega/algorithms/nas/sr_ea/sr_ea_codec.py index e62b126..3a9224a 100644 --- a/vega/algorithms/nas/sr_ea/sr_ea_codec.py +++ b/vega/algorithms/nas/sr_ea/sr_ea_codec.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Encode and decode the model config.""" from vega.core.search_algs.codec import Codec diff --git a/vega/algorithms/nas/sr_ea/sr_mutate.py b/vega/algorithms/nas/sr_ea/sr_mutate.py index b9b9a86..3291b75 100644 --- a/vega/algorithms/nas/sr_ea/sr_mutate.py +++ b/vega/algorithms/nas/sr_ea/sr_mutate.py @@ -1,22 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Mutate part of SR_EA algorithm.""" import logging import random from copy import deepcopy - -from .conf import SRConfig from vega.common import ClassFactory, ClassType from vega.report import ReportServer from vega.core.search_algs import SearchAlgorithm +from .conf import SRConfig @ClassFactory.register(ClassType.SEARCH_ALGORITHM) diff --git a/vega/algorithms/nas/sr_ea/sr_random.py b/vega/algorithms/nas/sr_ea/sr_random.py index c643913..5c35920 100644 --- a/vega/algorithms/nas/sr_ea/sr_random.py +++ b/vega/algorithms/nas/sr_ea/sr_random.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Random search algorithm for SR_EA.""" import random diff --git a/vega/algorithms/nlp/__init__.py b/vega/algorithms/nlp/__init__.py deleted file mode 100644 index 6786090..0000000 --- a/vega/algorithms/nlp/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from vega.common.class_factory import ClassFactory - - -ClassFactory.lazy_register("vega.algorithms.nlp", { - "bert_trainer_callback": ["BertTrainerCallback"], - "src.bert_for_pre_training": ["BertNetworkWithLoss"], -}) diff --git a/vega/algorithms/nlp/bert_trainer_callback.py b/vega/algorithms/nlp/bert_trainer_callback.py deleted file mode 100644 index d8c64d5..0000000 --- a/vega/algorithms/nlp/bert_trainer_callback.py +++ /dev/null @@ -1,287 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""The trainer program for Auto Lane.""" - -import logging -import os -from vega.common import ClassFactory, ClassType -from vega.trainer.trainer_ms import TrainerMs -from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor -from mindspore.train import Model as MsModel -from mindspore.train.train_thor import ConvertModelUtils -from mindspore import context -from mindspore.nn.optim import Lamb, Momentum, AdamWeightDecay, thor -from mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell -import mindspore.dataset as de -import mindspore.dataset.transforms.c_transforms as C -import mindspore.nn as nn -import mindspore.common.dtype as mstype -from mindspore.ops import operations as P -from mindspore.common.parameter import Parameter -from mindspore.common.tensor import Tensor -from mindspore.train.serialization import load_checkpoint, load_param_into_net -from mindspore.nn.metrics import Metric -from .src import BertNetworkWithLoss, BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell, \ - BertTrainAccumulationAllReduceEachWithLossScaleCell, \ - BertTrainAccumulationAllReducePostWithLossScaleCell, \ - BertTrainOneStepWithLossScaleCellForAdam, \ - AdamWeightDecayForBert, AdamWeightDecayOp -from .src.dataset import create_bert_dataset -from .src.utils import LossCallBack, BertLearningRate -from .src import BertModel, GetMaskedLMOutput - -logger = logging.getLogger(__name__) - - -class myMetric(Metric): - """Self-defined Metric as a callback.""" - - def __init__(self): - super(myMetric, self).__init__() - self.clear() - - def clear(self): - """Construct the trainer of Bert.""" - self.total_num = 0 - self.acc_num = 0 - - def update(self, *inputs): - """Construct the trainer of Bert.""" - total_num = self._convert_data(inputs[0]) - acc_num = self._convert_data(inputs[1]) - self.total_num = total_num - self.acc_num = acc_num - - def eval(self): - """Construct the trainer of Bert.""" - return self.acc_num / self.total_num - - -class GetLogProbs(nn.Cell): - """Get MaskedLM prediction scores.""" - - def __init__(self, config): - super(GetLogProbs, self).__init__() - self.bert = BertModel(config, False) - self.cls1 = GetMaskedLMOutput(config) - - def construct(self, input_ids, input_mask, token_type_id, masked_pos): - """Construct the trainer of Bert.""" - sequence_output, _, embedding_table = self.bert(input_ids, token_type_id, input_mask) - prediction_scores = self.cls1(sequence_output, embedding_table, masked_pos) - return prediction_scores - - -class BertPretrainEva(nn.Cell): - """Evaluate MaskedLM prediction scores.""" - - def __init__(self, config): - super(BertPretrainEva, self).__init__() - self.bert = GetLogProbs(config) - self.argmax = P.Argmax(axis=-1, output_type=mstype.int32) - self.equal = P.Equal() - self.mean = P.ReduceMean() - self.sum = P.ReduceSum() - self.total = Parameter(Tensor([0], mstype.float32)) - self.acc = Parameter(Tensor([0], mstype.float32)) - self.reshape = P.Reshape() - self.shape = P.Shape() - self.cast = P.Cast() - - def construct(self, input_ids, input_mask, token_type_id, masked_pos, masked_ids, masked_weights, nsp_label): - """Calculate prediction scores.""" - bs, _ = self.shape(input_ids) - probs = self.bert(input_ids, input_mask, token_type_id, masked_pos) - index = self.argmax(probs) - index = self.reshape(index, (bs, -1)) - eval_acc = self.equal(index, masked_ids) - eval_acc1 = self.cast(eval_acc, mstype.float32) - real_acc = eval_acc1 * masked_weights - acc = self.sum(real_acc) - total = self.sum(masked_weights) - self.total += total - self.acc += acc - return acc, self.total, self.acc - - -def get_enwiki_512_dataset(batch_size=1, repeat_count=1, distribute_file=''): - """Get enwiki dataset when seq_length is 512.""" - from .src.model_utils.config import config as cfg, bert_net_cfg - ds = de.TFRecordDataset([cfg.data_file], cfg.schema_file, columns_list=["input_ids", "input_mask", "segment_ids", - "masked_lm_positions", "masked_lm_ids", - "masked_lm_weights", - "next_sentence_labels"]) - type_cast_op = C.TypeCast(mstype.int32) - ds = ds.map(operations=type_cast_op, input_columns="segment_ids") - ds = ds.map(operations=type_cast_op, input_columns="input_mask") - ds = ds.map(operations=type_cast_op, input_columns="input_ids") - ds = ds.map(operations=type_cast_op, input_columns="masked_lm_ids") - ds = ds.map(operations=type_cast_op, input_columns="masked_lm_positions") - ds = ds.map(operations=type_cast_op, input_columns="next_sentence_labels") - ds = ds.repeat(repeat_count) - - # apply batch operations - ds = ds.batch(batch_size, drop_remainder=True) - return ds - - -def bert_predict(): - """Predict function.""" - from .src.model_utils.config import config as cfg, bert_net_cfg - devid = int(os.getenv('DEVICE_ID')) - context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=devid) - dataset = get_enwiki_512_dataset(cfg.batch_size, 1) - net_for_pretraining = BertPretrainEva(bert_net_cfg) - net_for_pretraining.set_train(False) - param_dict = load_checkpoint(cfg.finetune_ckpt) - load_param_into_net(net_for_pretraining, param_dict) - model = MsModel(net_for_pretraining) - return model, dataset, net_for_pretraining - - -def _get_optimizer(args_opt, network): - """Get bert optimizer, support Lamb, Momentum, AdamWeightDecay.""" - from .src.model_utils.config import config as cfg, bert_net_cfg - if cfg.optimizer == 'Lamb': - lr_schedule = BertLearningRate(learning_rate=cfg.Lamb.learning_rate, - end_learning_rate=cfg.Lamb.end_learning_rate, - warmup_steps=cfg.Lamb.warmup_steps, - decay_steps=args_opt.train_steps, - power=cfg.Lamb.power) - params = network.trainable_params() - decay_params = list(filter(cfg.Lamb.decay_filter, params)) - other_params = list(filter(lambda x: not cfg.Lamb.decay_filter(x), params)) - group_params = [{'params': decay_params, 'weight_decay': cfg.Lamb.weight_decay}, - {'params': other_params}, - {'order_params': params}] - optimizer = Lamb(group_params, learning_rate=lr_schedule, eps=cfg.Lamb.eps) - elif cfg.optimizer == 'Momentum': - optimizer = Momentum(network.trainable_params(), learning_rate=cfg.Momentum.learning_rate, - momentum=cfg.Momentum.momentum) - elif cfg.optimizer == 'AdamWeightDecay': - lr_schedule = BertLearningRate(learning_rate=cfg.AdamWeightDecay.learning_rate, - end_learning_rate=cfg.AdamWeightDecay.end_learning_rate, - warmup_steps=cfg.AdamWeightDecay.warmup_steps, - decay_steps=args_opt.train_steps, - power=cfg.AdamWeightDecay.power) - params = network.trainable_params() - decay_params = list(filter(cfg.AdamWeightDecay.decay_filter, params)) - other_params = list(filter(lambda x: not cfg.AdamWeightDecay.decay_filter(x), params)) - group_params = [{'params': decay_params, 'weight_decay': cfg.AdamWeightDecay.weight_decay}, - {'params': other_params, 'weight_decay': 0.0}, - {'order_params': params}] - if args_opt.enable_lossscale == "true" and args_opt.device_target == 'GPU': - optimizer = AdamWeightDecayForBert(group_params, learning_rate=lr_schedule, eps=cfg.AdamWeightDecay.eps) - elif context.get_context("mode") == context.PYNATIVE_MODE and args_opt.device_target == 'GPU': - optimizer = AdamWeightDecayOp(group_params, learning_rate=lr_schedule, eps=cfg.AdamWeightDecay.eps) - else: - optimizer = AdamWeightDecay(group_params, learning_rate=lr_schedule, eps=cfg.AdamWeightDecay.eps) - elif cfg.optimizer == "Thor": - from .src.utils import get_bert_thor_lr, get_bert_thor_damping - lr = get_bert_thor_lr(cfg.Thor.lr_max, cfg.Thor.lr_min, cfg.Thor.lr_power, cfg.Thor.lr_total_steps) - damping = get_bert_thor_damping(cfg.Thor.damping_max, cfg.Thor.damping_min, cfg.Thor.damping_power, - cfg.Thor.damping_total_steps) - split_indices = None - if bert_net_cfg.num_hidden_layers == 12 and not bert_net_cfg.use_relative_positions: - split_indices = [28, 55, 77] - elif bert_net_cfg.num_hidden_layers == 24 and not bert_net_cfg.use_relative_positions: - split_indices = [38, 93, 149] - optimizer = thor(network, lr, damping, cfg.Thor.momentum, - cfg.Thor.weight_decay, cfg.Thor.loss_scale, cfg.batch_size, - decay_filter=lambda x: 'layernorm' not in x.name.lower() and 'bias' not in x.name.lower(), - split_indices=split_indices, enable_clip_grad=True, frequency=cfg.Thor.frequency) - else: - raise ValueError("Don't support optimizer {}, only support [Lamb, Momentum, AdamWeightDecay, Thor]". - format(cfg.optimizer)) - return optimizer - - -@ClassFactory.register(ClassType.TRAINER) -class BertTrainerCallback(TrainerMs): - """Construct the trainer of Bert.""" - - disable_callbacks = ['ProgressLogger'] - - def build(self): - """Construct the trainer of Bert.""" - logging.debug("Trainer Config: {}".format(self.config)) - self._init_hps() - self.do_validation = False - self.use_syncbn = self.config.syncbn - if not self.train_loader: - self.train_loader = create_bert_dataset(int(os.environ.get("RANK_SIZE", "1")), - int(os.environ.get("RANK_ID", "0")), True, - '/root/lzc/zhwiki/wikidata/new/', '', 32) - if not self.valid_loader: - self.valid_loader = create_bert_dataset(int(os.environ.get("RANK_SIZE", "1")), - int(os.environ.get("RANK_ID", "0")), True, - '/root/lzc/zhwiki/wikidata/new/', '', 32) - self.batch_num_train = self.train_loader.get_dataset_size() - self.batch_num_valid = self.valid_loader.get_dataset_size() - - def _train_epoch(self): - """Construct the trainer of Bert.""" - from .src.model_utils.config import config as cfg, bert_net_cfg - cfg.train_steps = cfg.epoch_size * self.train_loader.get_dataset_size() // cfg.accumulation_steps - optimizer = _get_optimizer(cfg, self.model) - - if cfg.enable_lossscale == "true": - update_cell = DynamicLossScaleUpdateCell(loss_scale_value=cfg.loss_scale_value, - scale_factor=cfg.scale_factor, - scale_window=cfg.scale_window) - accumulation_steps = cfg.accumulation_steps - enable_global_norm = cfg.enable_global_norm - if accumulation_steps <= 1: - if cfg.optimizer == 'AdamWeightDecay' and cfg.device_target == 'GPU': - net_with_grads = BertTrainOneStepWithLossScaleCellForAdam(self.model, optimizer=optimizer, - scale_update_cell=update_cell) - else: - net_with_grads = BertTrainOneStepWithLossScaleCell(self.model, optimizer=optimizer, - scale_update_cell=update_cell) - else: - allreduce_post = cfg.distribute == "false" or cfg.allreduce_post_accumulation == "true" - net_with_accumulation = (BertTrainAccumulationAllReducePostWithLossScaleCell if allreduce_post else - BertTrainAccumulationAllReduceEachWithLossScaleCell) - net_with_grads = net_with_accumulation(self.model, optimizer=optimizer, - scale_update_cell=update_cell, - accumulation_steps=accumulation_steps, - enable_global_norm=enable_global_norm) - else: - net_with_grads = BertTrainOneStepCell(self.model, optimizer=optimizer, enable_clip_grad=True) - if cfg.optimizer == "Thor": - net_with_grads = BertTrainOneStepCell(self.model, optimizer=optimizer, sens=cfg.Thor.loss_scale, - enable_clip_grad=False) - - config_ck = CheckpointConfig(save_checkpoint_steps=self.config.save_steps, keep_checkpoint_max=1) - save_path = self.get_local_worker_path(self.step_name, self.worker_id) - ckpoint_cb = ModelCheckpoint(config=config_ck, directory=save_path) - loss_cb = LossMonitor() - callback_list = [ckpoint_cb, loss_cb] - model = MsModel(net_with_grads) - self.ms_model = ConvertModelUtils().convert_to_thor_model(model, network=net_with_grads, optimizer=optimizer) - try: - self.ms_model.train(epoch=self.epochs, - train_dataset=self.train_loader, - callbacks=callback_list, - dataset_sink_mode=False) - except RuntimeError as e: - logging.warning(f"failed to train the model, skip it, message: {str(e)}") - - def _valid_epoch(self): - """Construct the trainer of Bert.""" - _, dataset, net_for_pretraining = bert_predict() - net = MsModel(net_for_pretraining, eval_network=net_for_pretraining, eval_indexes=[0, 1, 2], - metrics={'name': myMetric()}) - res = net.eval(dataset, dataset_sink_mode=False) - logging.info('Accuracy is: {}'.format(res)) - valid_logs = dict() - valid_logs['cur_valid_perfs'] = res - self.callbacks.after_valid(valid_logs) diff --git a/vega/algorithms/nlp/src/CRF.py b/vega/algorithms/nlp/src/CRF.py deleted file mode 100644 index 0f9e590..0000000 --- a/vega/algorithms/nlp/src/CRF.py +++ /dev/null @@ -1,170 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""CRF script.""" - -import numpy as np -import mindspore.nn as nn -from mindspore.ops import operations as P -from mindspore.common.tensor import Tensor -from mindspore.common.parameter import Parameter -import mindspore.common.dtype as mstype - - -class CRF(nn.Cell): - """ - Condit Random Field. - - Args: - tag_to_index: The dict for tag to index mapping with extra "" and ""sign. - batch_size: Batch size, i.e., the length of the first dimension. - seq_length: Sequence length, i.e., the length of the second dimension. - is_training: Specifies whether to use training mode. - Returns: - Training mode: Tensor, total loss. - Evaluation mode: Tuple, the index for each step with the highest score; Tuple, the index for the last - step with the highest score. - """ - - def __init__(self, tag_to_index, batch_size=1, seq_length=128, is_training=True): - - super(CRF, self).__init__() - self.target_size = len(tag_to_index) - self.is_training = is_training - self.tag_to_index = tag_to_index - self.batch_size = batch_size - self.seq_length = seq_length - self.START_TAG = "" - self.STOP_TAG = "" - self.START_VALUE = Tensor(self.target_size - 2, dtype=mstype.int32) - self.STOP_VALUE = Tensor(self.target_size - 1, dtype=mstype.int32) - transitions = np.random.normal(size=(self.target_size, self.target_size)).astype(np.float32) - transitions[tag_to_index[self.START_TAG], :] = -10000 - transitions[:, tag_to_index[self.STOP_TAG]] = -10000 - self.transitions = Parameter(Tensor(transitions)) - self.cat = P.Concat(axis=-1) - self.argmax = P.ArgMaxWithValue(axis=-1) - self.log = P.Log() - self.exp = P.Exp() - self.sum = P.ReduceSum() - self.tile = P.Tile() - self.reduce_sum = P.ReduceSum(keep_dims=True) - self.reshape = P.Reshape() - self.expand = P.ExpandDims() - self.mean = P.ReduceMean() - init_alphas = np.ones(shape=(self.batch_size, self.target_size)) * -10000.0 - init_alphas[:, self.tag_to_index[self.START_TAG]] = 0. - self.init_alphas = Tensor(init_alphas, dtype=mstype.float32) - self.cast = P.Cast() - self.reduce_max = P.ReduceMax(keep_dims=True) - self.on_value = Tensor(1.0, dtype=mstype.float32) - self.off_value = Tensor(0.0, dtype=mstype.float32) - self.onehot = P.OneHot() - - def log_sum_exp(self, logits): - """Compute the log_sum_exp score for Normalization factor.""" - max_score = self.reduce_max(logits, -1) # 16 5 5 - score = self.log(self.reduce_sum(self.exp(logits - max_score), -1)) - score = max_score + score - return score - - def _realpath_score(self, features, label): - """Compute the emission and transition score for the real path.""" - label = label * 1 - concat_A = self.tile(self.reshape(self.START_VALUE, (1,)), (self.batch_size,)) - concat_A = self.reshape(concat_A, (self.batch_size, 1)) - labels = self.cat((concat_A, label)) - onehot_label = self.onehot(label, self.target_size, self.on_value, self.off_value) - emits = features * onehot_label - labels = self.onehot(labels, self.target_size, self.on_value, self.off_value) - label1 = labels[:, 1:, :] - label2 = labels[:, :self.seq_length, :] - label1 = self.expand(label1, 3) - label2 = self.expand(label2, 2) - label_trans = label1 * label2 - transitions = self.expand(self.expand(self.transitions, 0), 0) - trans = transitions * label_trans - score = self.sum(emits, (1, 2)) + self.sum(trans, (1, 2, 3)) - stop_value_index = labels[:, (self.seq_length - 1):self.seq_length, :] - stop_value = self.transitions[(self.target_size - 1):self.target_size, :] - stop_score = stop_value * self.reshape(stop_value_index, (self.batch_size, self.target_size)) - score = score + self.sum(stop_score, 1) - score = self.reshape(score, (self.batch_size, -1)) - return score - - def _normalization_factor(self, features): - """Compute the total score for all the paths.""" - forward_var = self.init_alphas - forward_var = self.expand(forward_var, 1) - for idx in range(self.seq_length): - feat = features[:, idx:(idx + 1), :] - emit_score = self.reshape(feat, (self.batch_size, self.target_size, 1)) - next_tag_var = emit_score + self.transitions + forward_var - forward_var = self.log_sum_exp(next_tag_var) - forward_var = self.reshape(forward_var, (self.batch_size, 1, self.target_size)) - terminal_var = forward_var + self.reshape(self.transitions[(self.target_size - 1):self.target_size, :], (1, -1)) - alpha = self.log_sum_exp(terminal_var) - alpha = self.reshape(alpha, (self.batch_size, -1)) - return alpha - - def _decoder(self, features): - """Viterbi decode for evaluation.""" - backpointers = () - forward_var = self.init_alphas - for idx in range(self.seq_length): - feat = features[:, idx:(idx + 1), :] - feat = self.reshape(feat, (self.batch_size, self.target_size)) - bptrs_t = () - - next_tag_var = self.expand(forward_var, 1) + self.transitions - best_tag_id, best_tag_value = self.argmax(next_tag_var) - bptrs_t += (best_tag_id,) - forward_var = best_tag_value + feat - - backpointers += (bptrs_t,) - terminal_var = forward_var + self.reshape(self.transitions[(self.target_size - 1):self.target_size, :], (1, -1)) - best_tag_id, _ = self.argmax(terminal_var) - return backpointers, best_tag_id - - def construct(self, features, label): - """Construct the trainer of Bert.""" - if self.is_training: - forward_score = self._normalization_factor(features) - gold_score = self._realpath_score(features, label) - return_value = self.mean(forward_score - gold_score) - else: - path_list, tag = self._decoder(features) - return_value = path_list, tag - return return_value - - -def postprocess(backpointers, best_tag_id): - """Do postprocess.""" - best_tag_id = best_tag_id.asnumpy() - batch_size = len(best_tag_id) - best_path = [] - for i in range(batch_size): - best_path.append([]) - best_local_id = best_tag_id[i] - best_path[-1].append(best_local_id) - for bptrs_t in reversed(backpointers): - bptrs_t = bptrs_t[0].asnumpy() - local_idx = bptrs_t[i] - best_local_id = local_idx[best_local_id] - best_path[-1].append(best_local_id) - # Pop off the start tag (we dont want to return that to the caller) - best_path[-1].pop() - best_path[-1].reverse() - return best_path diff --git a/vega/algorithms/nlp/src/__init__.py b/vega/algorithms/nlp/src/__init__.py deleted file mode 100644 index 0e3f1ab..0000000 --- a/vega/algorithms/nlp/src/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Bert Init.""" -from .bert_for_pre_training import BertNetworkWithLoss, BertPreTraining, \ - BertPretrainingLoss, GetMaskedLMOutput, GetNextSentenceOutput, \ - BertTrainOneStepCell, BertTrainOneStepWithLossScaleCell, \ - BertTrainAccumulationAllReduceEachWithLossScaleCell, \ - BertTrainAccumulationAllReducePostWithLossScaleCell, \ - BertTrainOneStepWithLossScaleCellForAdam -from .bert_model import BertAttention, BertConfig, BertEncoderCell, BertModel, \ - BertOutput, BertSelfAttention, BertTransformer, EmbeddingLookup, \ - EmbeddingPostprocessor, RelaPosEmbeddingsGenerator, RelaPosMatrixGenerator, \ - SaturateCast, CreateAttentionMaskFromInputMask -from .adam import AdamWeightDecayForBert, AdamWeightDecayOp -__all__ = [ - "BertNetworkWithLoss", "BertPreTraining", "BertPretrainingLoss", - "GetMaskedLMOutput", "GetNextSentenceOutput", "BertTrainOneStepCell", - "BertTrainOneStepWithLossScaleCell", "BertTrainAccumulationAllReduceEachWithLossScaleCell", - "BertTrainAccumulationAllReducePostWithLossScaleCell", - "BertAttention", "BertConfig", "BertEncoderCell", "BertModel", "BertOutput", - "BertSelfAttention", "BertTransformer", "EmbeddingLookup", - "EmbeddingPostprocessor", "RelaPosEmbeddingsGenerator", "AdamWeightDecayForBert", - "RelaPosMatrixGenerator", "SaturateCast", "CreateAttentionMaskFromInputMask", - "BertTrainOneStepWithLossScaleCellForAdam", "AdamWeightDecayOp" -] diff --git a/vega/algorithms/nlp/src/adam.py b/vega/algorithms/nlp/src/adam.py deleted file mode 100644 index 6b2c0f2..0000000 --- a/vega/algorithms/nlp/src/adam.py +++ /dev/null @@ -1,407 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""AdamWeightDecayForBert, a customized Adam for bert. Input: gradient, overflow flag.""" - -import numpy as np - -from mindspore.common import dtype as mstype -from mindspore.ops import operations as P -from mindspore.ops import composite as C -from mindspore.ops import functional as F -from mindspore.common.tensor import Tensor -from mindspore._checkparam import Validator as validator -from mindspore._checkparam import Rel -from mindspore.nn.optim.optimizer import Optimizer - -_adam_opt = C.MultitypeFuncGraph("adam_opt") -_scaler_one = Tensor(1, mstype.int32) -_scaler_ten = Tensor(10, mstype.float32) - - -@_adam_opt.register("Tensor", "Tensor", "Tensor", "Tensor", "Number", "Tensor", "Tensor", "Tensor", - "Tensor", "Bool", "Bool") -def _update_run_kernel(beta1, beta2, eps, lr, weight_decay, param, m, v, gradient, decay_flags, optim_filter): - """Update parameters by AdamWeightDecay op.""" - if optim_filter: - adam = P.AdamWeightDecay() - if decay_flags: - next_param = adam(param, m, v, lr, beta1, beta2, eps, Tensor(weight_decay, mstype.float32), gradient) - else: - next_param = adam(param, m, v, lr, beta1, beta2, eps, Tensor(0.0, mstype.float32), gradient) - return next_param - return gradient - - -@_adam_opt.register("Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Number", "Tensor", "Tensor", "Tensor", - "Tensor", "Bool", "Bool") -def _update_run_op(beta1, beta2, eps, lr, overflow, weight_decay, param, m, v, gradient, decay_flag, optim_filter): - """ - Update parameters. - - Args: - beta1 (Tensor): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0). - beta2 (Tensor): The exponential decay rate for the 2nd moment estimations. Should be in range (0.0, 1.0). - eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0. - lr (Tensor): Learning rate. - overflow (Tensor): Whether overflow occurs. - weight_decay (Number): Weight decay. Should be equal to or greater than 0. - param (Tensor): Parameters. - m (Tensor): m value of parameters. - v (Tensor): v value of parameters. - gradient (Tensor): Gradient of parameters. - decay_flag (bool): Applies weight decay or not. - optim_filter (bool): Applies parameter update or not. - - Returns: - Tensor, the new value of v after updating. - """ - if optim_filter: - op_mul = P.Mul() - op_square = P.Square() - op_sqrt = P.Sqrt() - op_cast = P.Cast() - op_reshape = P.Reshape() - op_shape = P.Shape() - op_select = P.Select() - - param_fp32 = op_cast(param, mstype.float32) - m_fp32 = op_cast(m, mstype.float32) - v_fp32 = op_cast(v, mstype.float32) - gradient_fp32 = op_cast(gradient, mstype.float32) - - cond = op_cast(F.fill(mstype.int32, op_shape(m_fp32), 1) * op_reshape(overflow, (())), mstype.bool_) - next_m = op_mul(beta1, m_fp32) + op_select(cond, m_fp32, - op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32) - beta1, - gradient_fp32)) - - next_v = op_mul(beta2, v_fp32) + op_select(cond, v_fp32, - op_mul(op_cast(F.tuple_to_array((1.0,)), mstype.float32) - beta2, - op_square(gradient_fp32))) - - update = next_m / (eps + op_sqrt(next_v)) - if decay_flag: - update = op_mul(weight_decay, param_fp32) + update - - update_with_lr = op_mul(lr, update) - zeros = F.fill(mstype.float32, op_shape(param_fp32), 0) - next_param = param_fp32 - op_select(cond, zeros, op_reshape(update_with_lr, op_shape(param_fp32))) - - next_param = F.depend(next_param, F.assign(param, op_cast(next_param, F.dtype(param)))) - next_param = F.depend(next_param, F.assign(m, op_cast(next_m, F.dtype(m)))) - next_param = F.depend(next_param, F.assign(v, op_cast(next_v, F.dtype(v)))) - - return op_cast(next_param, F.dtype(param)) - return gradient - - -@_adam_opt.register("Function", "Function", "Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor", - "Tensor", "Tensor", "Tensor", "Tensor", "RowTensor", "Tensor", "Tensor", "Tensor", "Bool", "Bool") -def _run_opt_with_sparse(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, beta1_power, - beta2_power, beta1, beta2, eps, lr, gradient, param, m, v, ps_parameter, cache_enable): - """Apply sparse adam optimizer to the weight parameter when the gradient is sparse.""" - success = True - indices = gradient.indices - values = gradient.values - if ps_parameter and not cache_enable: - op_shape = P.Shape() - shapes = (op_shape(param), op_shape(m), op_shape(v), - op_shape(beta1_power), op_shape(beta2_power), op_shape(lr), op_shape(beta1), - op_shape(beta2), op_shape(eps), op_shape(values), op_shape(indices)) - success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2, - eps, values, indices), shapes), param)) - return success - - if not target: - success = F.depend(success, sparse_opt(param, m, v, beta1_power, beta2_power, lr, beta1, beta2, - eps, values, indices)) - else: - op_mul = P.Mul() - op_square = P.Square() - op_sqrt = P.Sqrt() - scatter_add = P.ScatterAdd(use_locking) - - success = F.depend(success, F.assign(m, op_mul(beta1, m))) - success = F.depend(success, F.assign(v, op_mul(beta2, v))) - - grad_indices = gradient.indices - grad_value = gradient.values - - next_m = scatter_add(m, - grad_indices, - op_mul(F.tuple_to_array((1.0,)) - beta1, grad_value)) - - next_v = scatter_add(v, - grad_indices, - op_mul(F.tuple_to_array((1.0,)) - beta2, op_square(grad_value))) - - if use_nesterov: - m_temp = next_m * _scaler_ten - F.assign(m, op_mul(beta1, next_m)) - div_value = scatter_add(m, - op_mul(grad_indices, _scaler_one), - op_mul(F.tuple_to_array((1.0,)) - beta1, grad_value)) - param_update = div_value / (op_sqrt(next_v) + eps) - F.assign(m, m_temp / _scaler_ten) - else: - param_update = next_m / (op_sqrt(next_v) + eps) - - lr_t = lr * op_sqrt(1 - beta2_power) / (1 - beta1_power) - next_param = param - lr_t * param_update - - success = F.depend(success, F.assign(param, next_param)) - success = F.depend(success, F.assign(m, next_m)) - success = F.depend(success, F.assign(v, next_v)) - - return success - - -@_adam_opt.register("Function", "Function", "Function", "Function", "Bool", "Bool", "Bool", "Tensor", "Tensor", - "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Bool", "Bool") -def _run_opt_with_one_number(opt, sparse_opt, push, pull, use_locking, use_nesterov, target, - beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, - moment1, moment2, ps_parameter, cache_enable): - """Apply adam optimizer to the weight parameter using Tensor.""" - success = True - if ps_parameter and not cache_enable: - op_shape = P.Shape() - success = F.depend(success, pull(push((beta1_power, beta2_power, lr, beta1, beta2, eps, gradient), - (op_shape(param), op_shape(moment1), op_shape(moment2))), param)) - else: - success = F.depend(success, opt(param, moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2, - eps, gradient)) - return success - - -@_adam_opt.register("Function", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", "Tensor", - "Tensor", "Tensor") -def _run_off_load_opt(opt, beta1_power, beta2_power, beta1, beta2, eps, lr, gradient, param, moment1, moment2): - """Apply AdamOffload optimizer to the weight parameter using Tensor.""" - success = True - delat_param = opt(moment1, moment2, beta1_power, beta2_power, lr, beta1, beta2, eps, gradient) - success = F.depend(success, F.assign_add(param, delat_param)) - return success - - -def _check_param_value(beta1, beta2, eps, prim_name): - """Check the type of inputs.""" - validator.check_value_type("beta1", beta1, [float], prim_name) - validator.check_value_type("beta2", beta2, [float], prim_name) - validator.check_value_type("eps", eps, [float], prim_name) - validator.check_float_range(beta1, 0.0, 1.0, Rel.INC_NEITHER, "beta1", prim_name) - validator.check_float_range(beta2, 0.0, 1.0, Rel.INC_NEITHER, "beta2", prim_name) - validator.check_positive_float(eps, "eps", prim_name) - - -class AdamWeightDecayForBert(Optimizer): - """ - Implement the Adam algorithm to fix the weight decay. - - Args: - params (Union[list[Parameter], list[dict]]): When the `params` is a list of `Parameter` which will be updated, - the element in `params` must be class `Parameter`. When the `params` is a list of `dict`, the "params", - "lr", "weight_decay" and "order_params" are the keys can be parsed. - - - params: Required. The value must be a list of `Parameter`. - - - lr: Optional. If "lr" is in the keys, the value of the corresponding learning rate will be used. - If not, the `learning_rate` in the API will be used. - - - weight_decay: Optional. If "weight_decay" is in the keys, the value of the corresponding weight decay - will be used. If not, the `weight_decay` in the API will be used. - - - order_params: Optional. If "order_params" is in the keys, the value must be the order of parameters and - the order will be followed in the optimizer. There are no other keys in the `dict` and the parameters - which in the 'order_params' must be in one of group parameters. - - learning_rate (Union[float, Tensor, Iterable, LearningRateSchedule]): A value or a graph for the learning rate. - When the learning_rate is an Iterable or a Tensor in a 1D dimension, use the dynamic learning rate, then - the i-th step will take the i-th value as the learning rate. When the learning_rate is LearningRateSchedule, - use dynamic learning rate, the i-th learning rate will be calculated during the process of training - according to the formula of LearningRateSchedule. When the learning_rate is a float or a Tensor in a zero - dimension, use fixed learning rate. Other cases are not supported. The float learning rate must be - equal to or greater than 0. If the type of `learning_rate` is int, it will be converted to float. - Default: 1e-3. - beta1 (float): The exponential decay rate for the 1st moment estimations. Default: 0.9. - Should be in range (0.0, 1.0). - beta2 (float): The exponential decay rate for the 2nd moment estimations. Default: 0.999. - Should be in range (0.0, 1.0). - eps (float): Term added to the denominator to improve numerical stability. Default: 1e-6. - Should be greater than 0. - weight_decay (float): Weight decay (L2 penalty). It must be equal to or greater than 0. Default: 0.0. - - Inputs: - - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`. - - **overflow** (tuple[Tensor]) - The overflow flag in dynamiclossscale. - - Outputs: - tuple[bool], all elements are True. - - Supported Platforms: - ``Ascend`` ``GPU`` - - Examples: - >>> net = Net() - >>> #1) All parameters use the same learning rate and weight decay - >>> optim = AdamWeightDecay(params=net.trainable_params()) - >>> - >>> #2) Use parameter groups and set different values - >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) - >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) - >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, - ... {'params': no_conv_params, 'lr': 0.01}, - ... {'order_params': net.trainable_params()}] - >>> optim = AdamWeightDecay(group_params, learning_rate=0.1, weight_decay=0.0) - >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. - >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0. - >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. - >>> - >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> model = Model(net, loss_fn=loss, optimizer=optim) - """ - - def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0): - super(AdamWeightDecayForBert, self).__init__(learning_rate, params, weight_decay) - _check_param_value(beta1, beta2, eps, self.cls_name) - self.beta1 = Tensor(np.array([beta1]).astype(np.float32)) - self.beta2 = Tensor(np.array([beta2]).astype(np.float32)) - self.eps = Tensor(np.array([eps]).astype(np.float32)) - self.moments1 = self.parameters.clone(prefix="adam_m", init='zeros') - self.moments2 = self.parameters.clone(prefix="adam_v", init='zeros') - self.hyper_map = C.HyperMap() - self.op_select = P.Select() - self.op_cast = P.Cast() - self.op_reshape = P.Reshape() - self.op_shape = P.Shape() - - def construct(self, gradients, overflow): - """Construct the trainer of Bert.""" - lr = self.get_lr() - cond = self.op_cast(F.fill(mstype.int32, self.op_shape(self.beta1), 1) - * self.op_reshape(overflow, (())), mstype.bool_) - beta1 = self.op_select(cond, self.op_cast(F.tuple_to_array((1.0,)), mstype.float32), self.beta1) - beta2 = self.op_select(cond, self.op_cast(F.tuple_to_array((1.0,)), mstype.float32), self.beta2) - if self.is_group: - if self.is_group_lr: - optim_result = self.hyper_map(F.partial(_adam_opt, self.beta1, self.beta2, self.eps), - lr, self.weight_decay, self.parameters, self.moments1, self.moments2, - gradients, self.decay_flags, self.optim_filter) - else: - optim_result = self.hyper_map(F.partial(_adam_opt, beta1, beta2, self.eps, lr, overflow), - self.weight_decay, self.parameters, self.moments1, self.moments2, - gradients, self.decay_flags, self.optim_filter) - else: - optim_result = self.hyper_map(F.partial(_adam_opt, self.beta1, self.beta2, self.eps, lr, self.weight_decay), - self.parameters, self.moments1, self.moments2, - gradients, self.decay_flags, self.optim_filter) - if self.use_parallel: - self.broadcast_params(optim_result) - return optim_result - - -class AdamWeightDecayOp(Optimizer): - """ - Implement the Adam algorithm to fix the weight decay. It is a complete operator, not a combination of other ops. - - Args: - params (Union[list[Parameter], list[dict]]): When the `params` is a list of `Parameter` which will be updated, - the element in `params` must be class `Parameter`. When the `params` is a list of `dict`, the "params", - "lr", "weight_decay" and "order_params" are the keys can be parsed. - - - params: Required. The value must be a list of `Parameter`. - - - lr: Optional. If "lr" is in the keys, the value of the corresponding learning rate will be used. - If not, the `learning_rate` in the API will be used. - - - weight_decay: Optional. If "weight_decay" is in the keys, the value of the corresponding weight decay - will be used. If not, the `weight_decay` in the API will be used. - - - order_params: Optional. If "order_params" is in the keys, the value must be the order of parameters and - the order will be followed in the optimizer. There are no other keys in the `dict` and the parameters - which in the 'order_params' must be in one of group parameters. - - learning_rate (Union[float, Tensor, Iterable, LearningRateSchedule]): A value or a graph for the learning rate. - When the learning_rate is an Iterable or a Tensor in a 1D dimension, use the dynamic learning rate, then - the i-th step will take the i-th value as the learning rate. When the learning_rate is LearningRateSchedule, - use dynamic learning rate, the i-th learning rate will be calculated during the process of training - according to the formula of LearningRateSchedule. When the learning_rate is a float or a Tensor in a zero - dimension, use fixed learning rate. Other cases are not supported. The float learning rate must be - equal to or greater than 0. If the type of `learning_rate` is int, it will be converted to float. - Default: 1e-3. - beta1 (float): The exponential decay rate for the 1st moment estimations. Default: 0.9. - Should be in range (0.0, 1.0). - beta2 (float): The exponential decay rate for the 2nd moment estimations. Default: 0.999. - Should be in range (0.0, 1.0). - eps (float): Term added to the denominator to improve numerical stability. Default: 1e-6. - Should be greater than 0. - weight_decay (float): Weight decay (L2 penalty). It must be equal to or greater than 0. Default: 0.0. - - Inputs: - - **gradients** (tuple[Tensor]) - The gradients of `params`, the shape is the same as `params`. - - Outputs: - tuple[bool], all elements are True. - - Supported Platforms: - ``GPU`` - - Examples: - >>> net = Net() - >>> #1) All parameters use the same learning rate and weight decay - >>> optim = AdamWeightDecayOp(params=net.trainable_params()) - >>> - >>> #2) Use parameter groups and set different values - >>> conv_params = list(filter(lambda x: 'conv' in x.name, net.trainable_params())) - >>> no_conv_params = list(filter(lambda x: 'conv' not in x.name, net.trainable_params())) - >>> group_params = [{'params': conv_params, 'weight_decay': 0.01}, - ... {'params': no_conv_params, 'lr': 0.01}, - ... {'order_params': net.trainable_params()}] - >>> optim = AdamWeightDecayOp(group_params, learning_rate=0.1, weight_decay=0.0) - >>> # The conv_params's parameters will use default learning rate of 0.1 and weight decay of 0.01. - >>> # The no_conv_params's parameters will use learning rate of 0.01 and default weight decay of 0.0. - >>> # The final parameters order in which the optimizer will be followed is the value of 'order_params'. - >>> - >>> loss = nn.SoftmaxCrossEntropyWithLogits() - >>> model = Model(net, loss_fn=loss, optimizer=optim) - """ - - def __init__(self, params, learning_rate=1e-3, beta1=0.9, beta2=0.999, eps=1e-6, weight_decay=0.0): - super(AdamWeightDecayOp, self).__init__(learning_rate, params, weight_decay) - _check_param_value(beta1, beta2, eps, self.cls_name) - self.beta1 = Tensor(np.array([beta1]).astype(np.float32)) - self.beta2 = Tensor(np.array([beta2]).astype(np.float32)) - self.eps = Tensor(np.array([eps]).astype(np.float32)) - self.moments1 = self.parameters.clone(prefix="adam_m", init='zeros') - self.moments2 = self.parameters.clone(prefix="adam_v", init='zeros') - self.hyper_map = C.HyperMap() - - def construct(self, gradients): - """Construct the trainer of Bert.""" - lr = self.get_lr() - if self.is_group: - if self.is_group_lr: - optim_result = self.hyper_map(F.partial(_adam_opt, self.beta1, self.beta2, self.eps), - lr, self.weight_decay, self.parameters, self.moments1, self.moments2, - gradients, self.decay_flags, self.optim_filter) - else: - optim_result = self.hyper_map(F.partial(_adam_opt, self.beta1, self.beta2, self.eps, lr), - self.weight_decay, self.parameters, self.moments1, self.moments2, - gradients, self.decay_flags, self.optim_filter) - else: - optim_result = self.hyper_map(F.partial(_adam_opt, self.beta1, self.beta2, self.eps, lr, self.weight_decay), - self.parameters, self.moments1, self.moments2, - gradients, self.decay_flags, self.optim_filter) - if self.use_parallel: - self.broadcast_params(optim_result) - return optim_result diff --git a/vega/algorithms/nlp/src/assessment_method.py b/vega/algorithms/nlp/src/assessment_method.py deleted file mode 100644 index 6557715..0000000 --- a/vega/algorithms/nlp/src/assessment_method.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Bert evaluation assessment method script.""" - -import math -import numpy as np -from mindspore.nn.metrics import ConfusionMatrixMetric -from .CRF import postprocess - - -class Accuracy(): - """Calculate accuracy.""" - - def __init__(self): - self.acc_num = 0 - self.total_num = 0 - - def update(self, logits, labels): - """Construct the trainer of Bert.""" - labels = labels.asnumpy() - labels = np.reshape(labels, -1) - logits = logits.asnumpy() - logit_id = np.argmax(logits, axis=-1) - self.acc_num += np.sum(labels == logit_id) - self.total_num += len(labels) - - -class F1(): - """Calculate F1 score.""" - - def __init__(self, use_crf=False, num_labels=2, mode="Binary"): - self.TP = 0 - self.FP = 0 - self.FN = 0 - self.use_crf = use_crf - self.num_labels = num_labels - self.mode = mode - if self.mode.lower() not in ("binary", "multilabel"): - raise ValueError("Assessment mode not supported, support: [Binary, MultiLabel]") - if self.mode.lower() != "binary": - self.metric = ConfusionMatrixMetric(skip_channel=False, metric_name=("f1 score"), - calculation_method=False, decrease="mean") - - def update(self, logits, labels): - """Update F1 score.""" - labels = labels.asnumpy() - labels = np.reshape(labels, -1) - if self.use_crf: - backpointers, best_tag_id = logits - best_path = postprocess(backpointers, best_tag_id) - logit_id = [] - for ele in best_path: - logit_id.extend(ele) - else: - logits = logits.asnumpy() - logit_id = np.argmax(logits, axis=-1) - logit_id = np.reshape(logit_id, -1) - - if self.mode.lower() == "binary": - pos_eva = np.isin(logit_id, [i for i in range(1, self.num_labels)]) - pos_label = np.isin(labels, [i for i in range(1, self.num_labels)]) - self.TP += np.sum(pos_eva & pos_label) - self.FP += np.sum(pos_eva & (~pos_label)) - self.FN += np.sum((~pos_eva) & pos_label) - else: - target = np.zeros((len(labels), self.num_labels), dtype=np.int) - pred = np.zeros((len(logit_id), self.num_labels), dtype=np.int) - for i, label in enumerate(labels): - target[i][label] = 1 - for i, label in enumerate(logit_id): - pred[i][label] = 1 - self.metric.update(pred, target) - - def eval(self): - """Construct the trainer of Bert.""" - return self.metric.eval() - - -class MCC(): - """Calculate Matthews Correlation Coefficient.""" - - def __init__(self): - self.TP = 0 - self.FP = 0 - self.FN = 0 - self.TN = 0 - - def update(self, logits, labels): - """Construct the trainer of Bert.""" - labels = labels.asnumpy() - labels = np.reshape(labels, -1) - labels = labels.astype(np.bool) - logits = logits.asnumpy() - logit_id = np.argmax(logits, axis=-1) - logit_id = np.reshape(logit_id, -1) - logit_id = logit_id.astype(np.bool) - ornot = logit_id ^ labels - - self.TP += (~ornot & labels).sum() - self.FP += (ornot & ~labels).sum() - self.FN += (ornot & labels).sum() - self.TN += (~ornot & ~labels).sum() - - def cal(self): - """Construct the trainer of Bert.""" - mcc = (self.TP * self.TN - self.FP * self.FN) / math.sqrt((self.TP + self.FP) * (self.TP + self.FN) - * (self.TN + self.FP) * (self.TN + self.FN)) - return mcc - - -class Spearman_Correlation(): - """Calculate Spearman Correlation Coefficient.""" - - def __init__(self): - self.label = [] - self.logit = [] - - def update(self, logits, labels): - """Construct the trainer of Bert.""" - labels = labels.asnumpy() - labels = np.reshape(labels, -1) - logits = logits.asnumpy() - logits = np.reshape(logits, -1) - self.label.append(labels) - self.logit.append(logits) - - def cal(self): - """Calculate Spearman Correlation.""" - label = np.concatenate(self.label) - logit = np.concatenate(self.logit) - sort_label = label.argsort()[::-1] - sort_logit = logit.argsort()[::-1] - n = len(label) - d_acc = 0 - for i in range(n): - d = np.where(sort_label == i)[0] - np.where(sort_logit == i)[0] - d_acc += d ** 2 - ps = 1 - 6 * d_acc / n / (n ** 2 - 1) - return ps diff --git a/vega/algorithms/nlp/src/bert_for_finetune.py b/vega/algorithms/nlp/src/bert_for_finetune.py deleted file mode 100644 index 66eec16..0000000 --- a/vega/algorithms/nlp/src/bert_for_finetune.py +++ /dev/null @@ -1,342 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Bert for finetune script.""" - -import mindspore.nn as nn -from mindspore.ops import operations as P -from mindspore.ops import functional as F -from mindspore.ops import composite as C -from mindspore.common.tensor import Tensor -from mindspore.common.parameter import Parameter -from mindspore.common import dtype as mstype -from mindspore.nn.wrap.grad_reducer import DistributedGradReducer -from mindspore.context import ParallelMode -from mindspore.communication.management import get_group_size -from mindspore import context -from .bert_for_pre_training import clip_grad -from .finetune_eval_model import BertCLSModel, BertNERModel, BertSquadModel -from .utils import CrossEntropyCalculation - -GRADIENT_CLIP_TYPE = 1 -GRADIENT_CLIP_VALUE = 1.0 -grad_scale = C.MultitypeFuncGraph("grad_scale") -reciprocal = P.Reciprocal() - - -@grad_scale.register("Tensor", "Tensor") -def tensor_grad_scale(scale, grad): - """Construct the trainer of Bert.""" - return grad * reciprocal(scale) - - -_grad_overflow = C.MultitypeFuncGraph("_grad_overflow") -grad_overflow = P.FloatStatus() - - -@_grad_overflow.register("Tensor") -def _tensor_grad_overflow(grad): - """Construct the trainer of Bert.""" - return grad_overflow(grad) - - -class BertFinetuneCell(nn.Cell): - """ - Especially defined for finetuning where only four inputs tensor are needed. - - Args: - network (Cell): The training network. Note that loss function should have been added. - optimizer (Optimizer): Optimizer for updating the weights. - scale_update_cell (Cell): Cell to do the loss scale. Default: None. - """ - - def __init__(self, network, optimizer, scale_update_cell=None): - - super(BertFinetuneCell, self).__init__(auto_prefix=False) - self.network = network - self.network.set_grad() - self.weights = optimizer.parameters - self.optimizer = optimizer - self.grad = C.GradOperation(get_by_list=True, - sens_param=True) - self.reducer_flag = False - self.allreduce = P.AllReduce() - self.parallel_mode = context.get_auto_parallel_context("parallel_mode") - if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: - self.reducer_flag = True - self.grad_reducer = None - if self.reducer_flag: - mean = context.get_auto_parallel_context("gradients_mean") - degree = get_group_size() - self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree) - self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE) - self.cast = P.Cast() - self.gpu_target = False - if context.get_context("device_target") == "GPU": - self.gpu_target = True - self.float_status = P.FloatStatus() - self.addn = P.AddN() - self.reshape = P.Reshape() - else: - self.alloc_status = P.NPUAllocFloatStatus() - self.get_status = P.NPUGetFloatStatus() - self.clear_status = P.NPUClearFloatStatus() - self.reduce_sum = P.ReduceSum(keep_dims=False) - self.base = Tensor(1, mstype.float32) - self.less_equal = P.LessEqual() - self.hyper_map = C.HyperMap() - self.loss_scale = None - self.loss_scaling_manager = scale_update_cell - if scale_update_cell: - self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32)) - - def construct(self, - input_ids, - input_mask, - token_type_id, - label_ids, - sens=None): - """Bert Finetune.""" - weights = self.weights - init = False - loss = self.network(input_ids, - input_mask, - token_type_id, - label_ids) - if sens is None: - scaling_sens = self.loss_scale - else: - scaling_sens = sens - - if not self.gpu_target: - init = self.alloc_status() - init = F.depend(init, loss) - clear_status = self.clear_status(init) - scaling_sens = F.depend(scaling_sens, clear_status) - grads = self.grad(self.network, weights)(input_ids, - input_mask, - token_type_id, - label_ids, - self.cast(scaling_sens, - mstype.float32)) - grads = self.hyper_map(F.partial(grad_scale, scaling_sens), grads) - grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads) - if self.reducer_flag: - grads = self.grad_reducer(grads) - if not self.gpu_target: - init = F.depend(init, grads) - get_status = self.get_status(init) - init = F.depend(init, get_status) - flag_sum = self.reduce_sum(init, (0,)) - else: - flag_sum = self.hyper_map(F.partial(_grad_overflow), grads) - flag_sum = self.addn(flag_sum) - flag_sum = self.reshape(flag_sum, (())) - if self.is_distributed: - flag_reduce = self.allreduce(flag_sum) - cond = self.less_equal(self.base, flag_reduce) - else: - cond = self.less_equal(self.base, flag_sum) - overflow = cond - if sens is None: - overflow = self.loss_scaling_manager(self.loss_scale, cond) - if overflow: - succ = False - else: - succ = self.optimizer(grads) - ret = (loss, cond) - return F.depend(ret, succ) - - -class BertSquadCell(nn.Cell): - """Specify defined for finetuning where only four inputs tensor are needed.""" - - def __init__(self, network, optimizer, scale_update_cell=None): - super(BertSquadCell, self).__init__(auto_prefix=False) - self.network = network - self.network.set_grad() - self.weights = optimizer.parameters - self.optimizer = optimizer - self.grad = C.GradOperation(get_by_list=True, sens_param=True) - self.reducer_flag = False - self.allreduce = P.AllReduce() - self.parallel_mode = context.get_auto_parallel_context("parallel_mode") - if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: - self.reducer_flag = True - self.grad_reducer = None - if self.reducer_flag: - mean = context.get_auto_parallel_context("gradients_mean") - degree = get_group_size() - self.grad_reducer = DistributedGradReducer(optimizer.parameters, mean, degree) - self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE) - self.cast = P.Cast() - self.alloc_status = P.NPUAllocFloatStatus() - self.get_status = P.NPUGetFloatStatus() - self.clear_status = P.NPUClearFloatStatus() - self.reduce_sum = P.ReduceSum(keep_dims=False) - self.base = Tensor(1, mstype.float32) - self.less_equal = P.LessEqual() - self.hyper_map = C.HyperMap() - self.loss_scale = None - self.loss_scaling_manager = scale_update_cell - if scale_update_cell: - self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32)) - - def construct(self, - input_ids, - input_mask, - token_type_id, - start_position, - end_position, - unique_id, - is_impossible, - sens=None): - """Construct the trainer of Bert.""" - weights = self.weights - init = self.alloc_status() - loss = self.network(input_ids, - input_mask, - token_type_id, - start_position, - end_position, - unique_id, - is_impossible) - if sens is None: - scaling_sens = self.loss_scale - else: - scaling_sens = sens - init = F.depend(init, loss) - clear_status = self.clear_status(init) - scaling_sens = F.depend(scaling_sens, clear_status) - grads = self.grad(self.network, weights)(input_ids, - input_mask, - token_type_id, - start_position, - end_position, - unique_id, - is_impossible, - self.cast(scaling_sens, - mstype.float32)) - grads = self.hyper_map(F.partial(grad_scale, scaling_sens), grads) - grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads) - if self.reducer_flag: - grads = self.grad_reducer(grads) - init = F.depend(init, grads) - get_status = self.get_status(init) - init = F.depend(init, get_status) - flag_sum = self.reduce_sum(init, (0,)) - if self.is_distributed: - flag_reduce = self.allreduce(flag_sum) - cond = self.less_equal(self.base, flag_reduce) - else: - cond = self.less_equal(self.base, flag_sum) - overflow = cond - if sens is None: - overflow = self.loss_scaling_manager(self.loss_scale, cond) - if overflow: - succ = False - else: - succ = self.optimizer(grads) - ret = (loss, cond) - return F.depend(ret, succ) - - -class BertCLS(nn.Cell): - """Train interface for classification finetuning task.""" - - def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False, - assessment_method=""): - super(BertCLS, self).__init__() - self.bert = BertCLSModel(config, is_training, num_labels, dropout_prob, use_one_hot_embeddings, - assessment_method) - self.loss = CrossEntropyCalculation(is_training) - self.num_labels = num_labels - self.assessment_method = assessment_method - self.is_training = is_training - - def construct(self, input_ids, input_mask, token_type_id, label_ids): - """Construct the trainer of Bert.""" - logits = self.bert(input_ids, input_mask, token_type_id) - if self.assessment_method == "spearman_correlation": - if self.is_training: - loss = self.loss(logits, label_ids) - else: - loss = logits - else: - loss = self.loss(logits, label_ids, self.num_labels) - return loss - - -class BertNER(nn.Cell): - """Train interface for sequence labeling finetuning task.""" - - def __init__(self, config, batch_size, is_training, num_labels=11, use_crf=False, - tag_to_index=None, dropout_prob=0.0, use_one_hot_embeddings=False): - super(BertNER, self).__init__() - self.bert = BertNERModel(config, is_training, num_labels, use_crf, dropout_prob, use_one_hot_embeddings) - if use_crf: - if not tag_to_index: - raise Exception("The dict for tag-index mapping should be provided for CRF.") - from src.CRF import CRF - self.loss = CRF(tag_to_index, batch_size, config.seq_length, is_training) - else: - self.loss = CrossEntropyCalculation(is_training) - self.num_labels = num_labels - self.use_crf = use_crf - - def construct(self, input_ids, input_mask, token_type_id, label_ids): - """Construct the trainer of Bert.""" - logits = self.bert(input_ids, input_mask, token_type_id) - if self.use_crf: - loss = self.loss(logits, label_ids) - else: - loss = self.loss(logits, label_ids, self.num_labels) - return loss - - -class BertSquad(nn.Cell): - """Train interface for SQuAD finetuning task.""" - - def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False): - super(BertSquad, self).__init__() - self.bert = BertSquadModel(config, is_training, num_labels, dropout_prob, use_one_hot_embeddings) - self.loss = CrossEntropyCalculation(is_training) - self.num_labels = num_labels - self.seq_length = config.seq_length - self.is_training = is_training - self.total_num = Parameter(Tensor([0], mstype.float32)) - self.start_num = Parameter(Tensor([0], mstype.float32)) - self.end_num = Parameter(Tensor([0], mstype.float32)) - self.sum = P.ReduceSum() - self.equal = P.Equal() - self.argmax = P.ArgMaxWithValue(axis=1) - self.squeeze = P.Squeeze(axis=-1) - - def construct(self, input_ids, input_mask, token_type_id, start_position, end_position, unique_id, is_impossible): - """Interface for SQuAD finetuning task.""" - logits = self.bert(input_ids, input_mask, token_type_id) - if self.is_training: - unstacked_logits_0 = self.squeeze(logits[:, :, 0:1]) - unstacked_logits_1 = self.squeeze(logits[:, :, 1:2]) - start_loss = self.loss(unstacked_logits_0, start_position, self.seq_length) - end_loss = self.loss(unstacked_logits_1, end_position, self.seq_length) - total_loss = (start_loss + end_loss) / 2.0 - else: - start_logits = self.squeeze(logits[:, :, 0:1]) - start_logits = start_logits + 100 * input_mask - end_logits = self.squeeze(logits[:, :, 1:2]) - end_logits = end_logits + 100 * input_mask - total_loss = (unique_id, start_logits, end_logits) - return total_loss diff --git a/vega/algorithms/nlp/src/bert_for_pre_training.py b/vega/algorithms/nlp/src/bert_for_pre_training.py deleted file mode 100644 index 6ec9b24..0000000 --- a/vega/algorithms/nlp/src/bert_for_pre_training.py +++ /dev/null @@ -1,860 +0,0 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Bert for pretraining.""" -import numpy as np - -import mindspore.nn as nn -from mindspore.common.initializer import initializer, TruncatedNormal -from mindspore.ops import operations as P -from mindspore.ops import functional as F -from mindspore.ops import composite as C -from mindspore.common.tensor import Tensor -from mindspore.common.parameter import Parameter -from mindspore.common import dtype as mstype -from mindspore.nn.wrap.grad_reducer import DistributedGradReducer -from mindspore.context import ParallelMode -from mindspore.communication.management import get_group_size -from mindspore import context -from vega.common import ClassFactory, ClassType -from vega.modules.module import Module -from .bert_model import BertModel - -GRADIENT_CLIP_TYPE = 1 -GRADIENT_CLIP_VALUE = 1.0 - -clip_grad = C.MultitypeFuncGraph("clip_grad") - - -@ClassFactory.register(ClassType.NETWORK) -class Bert(Module): - """ - Provide bert pre-training loss through network. - - Args: - config (BertConfig): The config of BertModel. - is_training (bool): Specifies whether to use the training mode. - use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings. Default: False. - - Returns: - Tensor, the loss of the network. - """ - - def __init__(self): - from .model_utils.config import config as cfg, bert_net_cfg - super(Bert, self).__init__() - self.net = BertNetworkWithLoss(bert_net_cfg, True) - - def construct(self, - input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights): - """Get pre-training loss.""" - return self.net(input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights) - - -@clip_grad.register("Number", "Number", "Tensor") -def _clip_grad(clip_type, clip_value, grad): - """ - Clip gradients. - - Inputs: - clip_type (int): The way to clip, 0 for 'value', 1 for 'norm'. - clip_value (float): Specifies how much to clip. - grad (tuple[Tensor]): Gradients. - - Outputs: - tuple[Tensor], clipped gradients. - """ - if clip_type not in (0, 1): - return grad - dt = F.dtype(grad) - if clip_type == 0: - new_grad = C.clip_by_value(grad, F.cast(F.tuple_to_array((-clip_value,)), dt), - F.cast(F.tuple_to_array((clip_value,)), dt)) - else: - new_grad = nn.ClipByNorm()(grad, F.cast(F.tuple_to_array((clip_value,)), dt)) - return new_grad - - -class GetMaskedLMOutput(nn.Cell): - """ - Get masked lm output. - - Args: - config (BertConfig): The config of BertModel. - - Returns: - Tensor, masked lm output. - """ - - def __init__(self, config): - super(GetMaskedLMOutput, self).__init__() - self.width = config.hidden_size - self.reshape = P.Reshape() - self.gather = P.Gather() - - weight_init = TruncatedNormal(config.initializer_range) - self.dense = nn.Dense(self.width, - config.hidden_size, - weight_init=weight_init, - activation=config.hidden_act).to_float(config.compute_type) - self.layernorm = nn.LayerNorm((config.hidden_size,)).to_float(config.compute_type) - self.output_bias = Parameter( - initializer( - 'zero', - config.vocab_size)) - self.matmul = P.MatMul(transpose_b=True) - self.log_softmax = nn.LogSoftmax(axis=-1) - self.shape_flat_offsets = (-1, 1) - self.last_idx = (-1,) - self.shape_flat_sequence_tensor = (-1, self.width) - self.seq_length_tensor = Tensor(np.array((config.seq_length,)).astype(np.int32)) - self.cast = P.Cast() - self.compute_type = config.compute_type - self.dtype = config.dtype - - def construct(self, - input_tensor, - output_weights, - positions): - """Get output log_probs.""" - rng = F.tuple_to_array(F.make_range(P.Shape()(input_tensor)[0])) - flat_offsets = self.reshape(rng * self.seq_length_tensor, self.shape_flat_offsets) - flat_position = self.reshape(positions + flat_offsets, self.last_idx) - flat_sequence_tensor = self.reshape(input_tensor, self.shape_flat_sequence_tensor) - input_tensor = self.gather(flat_sequence_tensor, flat_position, 0) - input_tensor = self.cast(input_tensor, self.compute_type) - output_weights = self.cast(output_weights, self.compute_type) - input_tensor = self.dense(input_tensor) - input_tensor = self.layernorm(input_tensor) - logits = self.matmul(input_tensor, output_weights) - logits = self.cast(logits, self.dtype) - logits = logits + self.output_bias - log_probs = self.log_softmax(logits) - return log_probs - - -class GetNextSentenceOutput(nn.Cell): - """ - Get next sentence output. - - Args: - config (BertConfig): The config of Bert. - - Returns: - Tensor, next sentence output. - """ - - def __init__(self, config): - super(GetNextSentenceOutput, self).__init__() - self.log_softmax = P.LogSoftmax() - weight_init = TruncatedNormal(config.initializer_range) - self.dense = nn.Dense(config.hidden_size, 2, - weight_init=weight_init, has_bias=True).to_float(config.compute_type) - self.dtype = config.dtype - self.cast = P.Cast() - - def construct(self, input_tensor): - """Construct the trainer of Bert.""" - logits = self.dense(input_tensor) - logits = self.cast(logits, self.dtype) - log_prob = self.log_softmax(logits) - return log_prob - - -class BertPreTraining(nn.Cell): - """ - Bert pretraining network. - - Args: - config (BertConfig): The config of BertModel. - is_training (bool): Specifies whether to use the training mode. - use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings. - - Returns: - Tensor, prediction_scores, seq_relationship_score. - """ - - def __init__(self, config, is_training, use_one_hot_embeddings): - super(BertPreTraining, self).__init__() - self.bert = BertModel(config, is_training, use_one_hot_embeddings) - self.cls1 = GetMaskedLMOutput(config) - self.cls2 = GetNextSentenceOutput(config) - - def construct(self, input_ids, input_mask, token_type_id, - masked_lm_positions): - """Construct the trainer of Bert.""" - sequence_output, pooled_output, embedding_table = \ - self.bert(input_ids, token_type_id, input_mask) - prediction_scores = self.cls1(sequence_output, - embedding_table, - masked_lm_positions) - seq_relationship_score = self.cls2(pooled_output) - return prediction_scores, seq_relationship_score - - -class BertPretrainingLoss(nn.Cell): - """ - Provide bert pre-training loss. - - Args: - config (BertConfig): The config of BertModel. - - Returns: - Tensor, total loss. - """ - - def __init__(self, config): - super(BertPretrainingLoss, self).__init__() - self.vocab_size = config.vocab_size - self.onehot = P.OneHot() - self.on_value = Tensor(1.0, mstype.float32) - self.off_value = Tensor(0.0, mstype.float32) - self.reduce_sum = P.ReduceSum() - self.reduce_mean = P.ReduceMean() - self.reshape = P.Reshape() - self.last_idx = (-1,) - self.neg = P.Neg() - self.cast = P.Cast() - - def construct(self, prediction_scores, seq_relationship_score, masked_lm_ids, - masked_lm_weights, next_sentence_labels): - """Define the computation performed.""" - label_ids = self.reshape(masked_lm_ids, self.last_idx) - label_weights = self.cast(self.reshape(masked_lm_weights, self.last_idx), mstype.float32) - one_hot_labels = self.onehot(label_ids, self.vocab_size, self.on_value, self.off_value) - - per_example_loss = self.neg(self.reduce_sum(prediction_scores * one_hot_labels, self.last_idx)) - numerator = self.reduce_sum(label_weights * per_example_loss, ()) - denominator = self.reduce_sum(label_weights, ()) + self.cast(F.tuple_to_array((1e-5,)), mstype.float32) - masked_lm_loss = numerator / denominator - - # next_sentence_loss - labels = self.reshape(next_sentence_labels, self.last_idx) - one_hot_labels = self.onehot(labels, 2, self.on_value, self.off_value) - per_example_loss = self.neg(self.reduce_sum( - one_hot_labels * seq_relationship_score, self.last_idx)) - next_sentence_loss = self.reduce_mean(per_example_loss, self.last_idx) - - # total_loss - total_loss = masked_lm_loss + next_sentence_loss - print(total_loss) - return total_loss - - -class BertNetworkWithLoss(nn.Cell): - """ - Provide bert pre-training loss through network. - - Args: - config (BertConfig): The config of BertModel. - is_training (bool): Specifies whether to use the training mode. - use_one_hot_embeddings (bool): Specifies whether to use one-hot for embeddings. Default: False. - - Returns: - Tensor, the loss of the network. - """ - - def __init__(self, config, is_training, use_one_hot_embeddings=False): - super(BertNetworkWithLoss, self).__init__() - self.bert = BertPreTraining(config, is_training, use_one_hot_embeddings) - self.loss = BertPretrainingLoss(config) - self.cast = P.Cast() - - def construct(self, - input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights): - """Get pre-training loss.""" - prediction_scores, seq_relationship_score = \ - self.bert(input_ids, input_mask, token_type_id, masked_lm_positions) - total_loss = self.loss(prediction_scores, seq_relationship_score, - masked_lm_ids, masked_lm_weights, next_sentence_labels) - return self.cast(total_loss, mstype.float32) - - -class BertTrainOneStepCell(nn.TrainOneStepCell): - """ - Encapsulation class of bert network training. - - Append an optimizer to the training network after that the construct - function can be called to create the backward graph. - - Args: - network (Cell): The training network. Note that loss function should have been added. - optimizer (Optimizer): Optimizer for updating the weights. - sens (Number): The adjust parameter. Default: 1.0. - enable_clip_grad (boolean): If True, clip gradients in BertTrainOneStepCell. Default: True. - """ - - def __init__(self, network, optimizer, sens=1.0, enable_clip_grad=True): - super(BertTrainOneStepCell, self).__init__(network, optimizer, sens) - self.cast = P.Cast() - self.hyper_map = C.HyperMap() - self.enable_clip_grad = enable_clip_grad - - def set_sens(self, value): - """Construct the trainer of Bert.""" - self.sens = value - - def construct(self, - input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights): - """Define the computation performed.""" - weights = self.weights - - loss = self.network(input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights) - grads = self.grad(self.network, weights)(input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights, - self.cast(F.tuple_to_array((self.sens,)), - mstype.float32)) - if self.enable_clip_grad: - grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads) - grads = self.grad_reducer(grads) - succ = self.optimizer(grads) - return F.depend(loss, succ) - - -grad_scale = C.MultitypeFuncGraph("grad_scale") -reciprocal = P.Reciprocal() - - -@grad_scale.register("Tensor", "Tensor") -def tensor_grad_scale(scale, grad): - """Construct the trainer of Bert.""" - return grad * reciprocal(scale) - - -_grad_overflow = C.MultitypeFuncGraph("_grad_overflow") -grad_overflow = P.FloatStatus() - - -@_grad_overflow.register("Tensor") -def _tensor_grad_overflow(grad): - """Construct the trainer of Bert.""" - return grad_overflow(grad) - - -class BertTrainOneStepWithLossScaleCell(nn.TrainOneStepWithLossScaleCell): - """ - Encapsulation class of bert network training. - - Append an optimizer to the training network after that the construct - function can be called to create the backward graph. - - Args: - network (Cell): The training network. Note that loss function should have been added. - optimizer (Optimizer): Optimizer for updating the weights. - scale_update_cell (Cell): Cell to do the loss scale. Default: None. - """ - - def __init__(self, network, optimizer, scale_update_cell=None): - super(BertTrainOneStepWithLossScaleCell, self).__init__(network, optimizer, scale_update_cell) - self.cast = P.Cast() - self.degree = 1 - if self.reducer_flag: - self.degree = get_group_size() - self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree) - - self.loss_scale = None - self.loss_scaling_manager = scale_update_cell - if scale_update_cell: - self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32)) - - def construct(self, - input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights, - sens=None): - """Define the computation performed.""" - weights = self.weights - loss = self.network(input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights) - if sens is None: - scaling_sens = self.loss_scale - else: - scaling_sens = sens - status, scaling_sens = self.start_overflow_check(loss, scaling_sens) - grads = self.grad(self.network, weights)(input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights, - self.cast(scaling_sens, - mstype.float32)) - # apply grad reducer on grads - grads = self.grad_reducer(grads) - grads = self.hyper_map(F.partial(grad_scale, scaling_sens * self.degree), grads) - grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads) - - cond = self.get_overflow_status(status, grads) - overflow = cond - if sens is None: - overflow = self.loss_scaling_manager(self.loss_scale, cond) - if overflow: - succ = False - else: - succ = self.optimizer(grads) - ret = (loss, cond, scaling_sens) - return F.depend(ret, succ) - - -class BertTrainOneStepWithLossScaleCellForAdam(nn.TrainOneStepWithLossScaleCell): - """ - Encapsulation class of bert network training. - - Append an optimizer to the training network after that the construct - function can be called to create the backward graph. - Different from BertTrainOneStepWithLossScaleCell, the optimizer takes the overflow - condition as input. - - Args: - network (Cell): The training network. Note that loss function should have been added. - optimizer (Optimizer): Optimizer for updating the weights. - scale_update_cell (Cell): Cell to do the loss scale. Default: None. - """ - - def __init__(self, network, optimizer, scale_update_cell=None): - super(BertTrainOneStepWithLossScaleCellForAdam, self).__init__(network, optimizer, scale_update_cell) - self.cast = P.Cast() - self.degree = 1 - if self.reducer_flag: - self.degree = get_group_size() - self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree) - self.loss_scale = None - self.loss_scaling_manager = scale_update_cell - if scale_update_cell: - self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32)) - - def construct(self, - input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights, - sens=None): - """Define the computation performed.""" - weights = self.weights - loss = self.network(input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights) - if sens is None: - scaling_sens = self.loss_scale - else: - scaling_sens = sens - - status, scaling_sens = self.start_overflow_check(loss, scaling_sens) - grads = self.grad(self.network, weights)(input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights, - self.cast(scaling_sens, - mstype.float32)) - # apply grad reducer on grads - grads = self.grad_reducer(grads) - grads = self.hyper_map(F.partial(grad_scale, scaling_sens * self.degree), grads) - grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads) - cond = self.get_overflow_status(status, grads) - overflow = cond - if self.loss_scaling_manager is not None: - overflow = self.loss_scaling_manager(scaling_sens, cond) - succ = self.optimizer(grads, overflow) - ret = (loss, cond, scaling_sens) - return F.depend(ret, succ) - - -cast = P.Cast() -add_grads = C.MultitypeFuncGraph("add_grads") - - -@add_grads.register("Tensor", "Tensor") -def _add_grads(accu_grad, grad): - return accu_grad + cast(grad, mstype.float32) - - -update_accu_grads = C.MultitypeFuncGraph("update_accu_grads") - - -@update_accu_grads.register("Tensor", "Tensor") -def _update_accu_grads(accu_grad, grad): - succ = True - return F.depend(succ, F.assign(accu_grad, cast(grad, mstype.float32))) - - -accumulate_accu_grads = C.MultitypeFuncGraph("accumulate_accu_grads") - - -@accumulate_accu_grads.register("Tensor", "Tensor") -def _accumulate_accu_grads(accu_grad, grad): - succ = True - return F.depend(succ, F.assign_add(accu_grad, cast(grad, mstype.float32))) - - -zeroslike = P.ZerosLike() -reset_accu_grads = C.MultitypeFuncGraph("reset_accu_grads") - - -@reset_accu_grads.register("Tensor") -def _reset_accu_grads(accu_grad): - succ = True - return F.depend(succ, F.assign(accu_grad, zeroslike(accu_grad))) - - -class BertTrainAccumulationAllReducePostWithLossScaleCell(nn.Cell): - """ - Encapsulation class of bert network training. - - Append an optimizer to the training network after that the construct - function can be called to create the backward graph. - - To mimic higher batch size, gradients are accumulated N times before weight update. - - For distribution mode, allreduce will only be implemented in the weight updated step, - i.e. the sub-step after gradients accumulated N times. - - Args: - network (Cell): The training network. Note that loss function should have been added. - optimizer (Optimizer): Optimizer for updating the weights. - scale_update_cell (Cell): Cell to do the loss scale. Default: None. - accumulation_steps (int): Number of accumulation steps before gradient update. The global batch size = - batch_size * accumulation_steps. Default: 1. - """ - - def __init__(self, network, optimizer, scale_update_cell=None, accumulation_steps=1, enable_global_norm=False): - super(BertTrainAccumulationAllReducePostWithLossScaleCell, self).__init__(auto_prefix=False) - self.network = network - self.network.set_grad() - self.weights = optimizer.parameters - self.optimizer = optimizer - self.accumulation_steps = accumulation_steps - self.enable_global_norm = enable_global_norm - self.one = Tensor(np.array([1]).astype(np.int32)) - self.zero = Tensor(np.array([0]).astype(np.int32)) - self.local_step = Parameter(initializer(0, [1], mstype.int32)) - self.accu_grads = self.weights.clone(prefix="accu_grads", init='zeros') - self.accu_overflow = Parameter(initializer(0, [1], mstype.int32)) - self.accu_loss = Parameter(initializer(0, [1], mstype.float32)) - - self.grad = C.GradOperation(get_by_list=True, sens_param=True) - self.reducer_flag = False - self.parallel_mode = context.get_auto_parallel_context("parallel_mode") - if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: - self.reducer_flag = True - self.grad_reducer = F.identity - self.degree = 1 - if self.reducer_flag: - self.degree = get_group_size() - self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree) - self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE) - self.overflow_reducer = F.identity - if self.is_distributed: - self.overflow_reducer = P.AllReduce() - self.cast = P.Cast() - self.alloc_status = P.NPUAllocFloatStatus() - self.get_status = P.NPUGetFloatStatus() - self.clear_status = P.NPUClearFloatStatus() - self.reduce_sum = P.ReduceSum(keep_dims=False) - self.base = Tensor(1, mstype.float32) - self.less_equal = P.LessEqual() - self.logical_or = P.LogicalOr() - self.not_equal = P.NotEqual() - self.select = P.Select() - self.reshape = P.Reshape() - self.hyper_map = C.HyperMap() - self.loss_scale = None - self.loss_scaling_manager = scale_update_cell - if scale_update_cell: - self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32)) - - def construct(self, - input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights, - sens=None): - """Define the computation performed.""" - weights = self.weights - loss = self.network(input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights) - if sens is None: - scaling_sens = self.loss_scale - else: - scaling_sens = sens - # alloc status and clear should be right before gradoperation - init = self.alloc_status() - init = F.depend(init, loss) - clear_status = self.clear_status(init) - scaling_sens = F.depend(scaling_sens, clear_status) - # update accumulation parameters - is_accu_step = self.not_equal(self.local_step, self.accumulation_steps) - self.local_step = self.select(is_accu_step, self.local_step + self.one, self.one) - self.accu_loss = self.select(is_accu_step, self.accu_loss + loss, loss) - mean_loss = self.accu_loss / self.local_step - is_accu_step = self.not_equal(self.local_step, self.accumulation_steps) - - grads = self.grad(self.network, weights)(input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights, - self.cast(scaling_sens, - mstype.float32)) - - accu_succ = self.hyper_map(accumulate_accu_grads, self.accu_grads, grads) - mean_loss = F.depend(mean_loss, accu_succ) - - init = F.depend(init, mean_loss) - get_status = self.get_status(init) - init = F.depend(init, get_status) - flag_sum = self.reduce_sum(init, (0,)) - overflow = self.less_equal(self.base, flag_sum) - overflow = self.logical_or(self.not_equal(self.accu_overflow, self.zero), overflow) - accu_overflow = self.select(overflow, self.one, self.zero) - self.accu_overflow = self.select(is_accu_step, accu_overflow, self.zero) - - if is_accu_step: - succ = False - else: - # apply grad reducer on grads - grads = self.grad_reducer(self.accu_grads) - scaling = scaling_sens * self.degree * self.accumulation_steps - grads = self.hyper_map(F.partial(grad_scale, scaling), grads) - if self.enable_global_norm: - grads = C.clip_by_global_norm(grads, 1.0, None) - else: - grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads) - accu_overflow = F.depend(accu_overflow, grads) - accu_overflow = self.overflow_reducer(accu_overflow) - overflow = self.less_equal(self.base, accu_overflow) - accu_succ = self.hyper_map(reset_accu_grads, self.accu_grads) - overflow = F.depend(overflow, accu_succ) - overflow = self.reshape(overflow, (())) - if sens is None: - overflow = self.loss_scaling_manager(self.loss_scale, overflow) - if overflow: - succ = False - else: - succ = self.optimizer(grads) - - ret = (mean_loss, overflow, scaling_sens) - return F.depend(ret, succ) - - -class BertTrainAccumulationAllReduceEachWithLossScaleCell(nn.Cell): - """ - Encapsulation class of bert network training. - - Append an optimizer to the training network after that the construct - function can be called to create the backward graph. - - To mimic higher batch size, gradients are accumulated N times before weight update. - - For distribution mode, allreduce will be implemented after each sub-step and the trailing time - will be overided by backend optimization pass. - - Args: - network (Cell): The training network. Note that loss function should have been added. - optimizer (Optimizer): Optimizer for updating the weights. - scale_update_cell (Cell): Cell to do the loss scale. Default: None. - accumulation_steps (int): Number of accumulation steps before gradient update. The global batch size = - batch_size * accumulation_steps. Default: 1. - """ - - def __init__(self, network, optimizer, scale_update_cell=None, accumulation_steps=1, enable_global_norm=False): - super(BertTrainAccumulationAllReduceEachWithLossScaleCell, self).__init__(auto_prefix=False) - self.network = network - self.network.set_grad() - self.weights = optimizer.parameters - self.optimizer = optimizer - self.accumulation_steps = accumulation_steps - self.enable_global_norm = enable_global_norm - self.one = Tensor(np.array([1]).astype(np.int32)) - self.zero = Tensor(np.array([0]).astype(np.int32)) - self.local_step = Parameter(initializer(0, [1], mstype.int32)) - self.accu_grads = self.weights.clone(prefix="accu_grads", init='zeros') - self.accu_overflow = Parameter(initializer(0, [1], mstype.int32)) - self.accu_loss = Parameter(initializer(0, [1], mstype.float32)) - - self.grad = C.GradOperation(get_by_list=True, sens_param=True) - self.reducer_flag = False - self.parallel_mode = context.get_auto_parallel_context("parallel_mode") - if self.parallel_mode in [ParallelMode.DATA_PARALLEL, ParallelMode.HYBRID_PARALLEL]: - self.reducer_flag = True - self.grad_reducer = F.identity - self.degree = 1 - if self.reducer_flag: - self.degree = get_group_size() - self.grad_reducer = DistributedGradReducer(optimizer.parameters, False, self.degree) - self.is_distributed = (self.parallel_mode != ParallelMode.STAND_ALONE) - self.overflow_reducer = F.identity - if self.is_distributed: - self.overflow_reducer = P.AllReduce() - self.cast = P.Cast() - self.alloc_status = P.NPUAllocFloatStatus() - self.get_status = P.NPUGetFloatStatus() - self.clear_before_grad = P.NPUClearFloatStatus() - self.reduce_sum = P.ReduceSum(keep_dims=False) - self.base = Tensor(1, mstype.float32) - self.less_equal = P.LessEqual() - self.logical_or = P.LogicalOr() - self.not_equal = P.NotEqual() - self.select = P.Select() - self.reshape = P.Reshape() - self.hyper_map = C.HyperMap() - self.loss_scale = None - self.loss_scaling_manager = scale_update_cell - if scale_update_cell: - self.loss_scale = Parameter(Tensor(scale_update_cell.get_loss_scale(), dtype=mstype.float32)) - - @C.add_flags(has_effect=True) - def construct(self, - input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights, - sens=None): - """Define the computation performed.""" - weights = self.weights - loss = self.network(input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights) - if sens is None: - scaling_sens = self.loss_scale - else: - scaling_sens = sens - - # update accumulation parameters - is_accu_step = self.not_equal(self.local_step, self.accumulation_steps) - self.local_step = self.select(is_accu_step, self.local_step + self.one, self.one) - self.accu_loss = self.select(is_accu_step, self.accu_loss + loss, loss) - mean_loss = self.accu_loss / self.local_step - is_accu_step = self.not_equal(self.local_step, self.accumulation_steps) - - # alloc status and clear should be right before gradoperation - init = self.alloc_status() - self.clear_before_grad(init) - grads = self.grad(self.network, weights)(input_ids, - input_mask, - token_type_id, - next_sentence_labels, - masked_lm_positions, - masked_lm_ids, - masked_lm_weights, - self.cast(scaling_sens, - mstype.float32)) - - accu_grads = self.hyper_map(add_grads, self.accu_grads, grads) - scaling = scaling_sens * self.degree * self.accumulation_steps - grads = self.hyper_map(F.partial(grad_scale, scaling), accu_grads) - grads = self.grad_reducer(grads) - - self.get_status(init) - flag_sum = self.reduce_sum(init, (0,)) - flag_reduce = self.overflow_reducer(flag_sum) - overflow = self.less_equal(self.base, flag_reduce) - overflow = self.logical_or(self.not_equal(self.accu_overflow, self.zero), overflow) - accu_overflow = self.select(overflow, self.one, self.zero) - self.accu_overflow = self.select(is_accu_step, accu_overflow, self.zero) - overflow = self.reshape(overflow, (())) - - if is_accu_step: - succ = False - accu_succ = self.hyper_map(update_accu_grads, self.accu_grads, accu_grads) - succ = F.depend(succ, accu_succ) - else: - if sens is None: - overflow = self.loss_scaling_manager(self.loss_scale, overflow) - if overflow: - succ = False - else: - if self.enable_global_norm: - grads = C.clip_by_global_norm(grads, 1.0, None) - else: - grads = self.hyper_map(F.partial(clip_grad, GRADIENT_CLIP_TYPE, GRADIENT_CLIP_VALUE), grads) - - succ = self.optimizer(grads) - - accu_succ = self.hyper_map(reset_accu_grads, self.accu_grads) - succ = F.depend(succ, accu_succ) - - ret = (mean_loss, overflow, scaling_sens) - return F.depend(ret, succ) diff --git a/vega/algorithms/nlp/src/bert_model.py b/vega/algorithms/nlp/src/bert_model.py deleted file mode 100644 index 2ead006..0000000 --- a/vega/algorithms/nlp/src/bert_model.py +++ /dev/null @@ -1,891 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Bert model.""" - -import math -import copy -import numpy as np -import mindspore.common.dtype as mstype -import mindspore.nn as nn -import mindspore.ops.functional as F -from mindspore.common.initializer import TruncatedNormal, initializer -from mindspore.ops import operations as P -from mindspore.ops import composite as C -from mindspore.common.tensor import Tensor -from mindspore.common.parameter import Parameter - - -class BertConfig: - """ - Configurate for `BertModel`. - - Args: - seq_length (int): Length of input sequence. Default: 128. - vocab_size (int): The shape of each embedding vector. Default: 32000. - hidden_size (int): Size of the bert encoder layers. Default: 768. - num_hidden_layers (int): Number of hidden layers in the BertTransformer encoder - cell. Default: 12. - num_attention_heads (int): Number of attention heads in the BertTransformer - encoder cell. Default: 12. - intermediate_size (int): Size of intermediate layer in the BertTransformer - encoder cell. Default: 3072. - hidden_act (str): Activation function used in the BertTransformer encoder - cell. Default: "gelu". - hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1. - attention_probs_dropout_prob (float): The dropout probability for - BertAttention. Default: 0.1. - max_position_embeddings (int): Maximum length of sequences used in this - model. Default: 512. - type_vocab_size (int): Size of token type vocab. Default: 16. - initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02. - use_relative_positions (bool): Specifies whether to use relative positions. Default: False. - dtype (:class:`mindspore.dtype`): Data type of the input. Default: mstype.float32. - compute_type (:class:`mindspore.dtype`): Compute type in BertTransformer. Default: mstype.float32. - """ - - def __init__(self, - seq_length=128, - vocab_size=32000, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=512, - type_vocab_size=16, - initializer_range=0.02, - use_relative_positions=False, - dtype=mstype.float32, - compute_type=mstype.float32): - self.seq_length = seq_length - self.vocab_size = vocab_size - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.hidden_act = hidden_act - self.intermediate_size = intermediate_size - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.max_position_embeddings = max_position_embeddings - self.type_vocab_size = type_vocab_size - self.initializer_range = initializer_range - self.use_relative_positions = use_relative_positions - self.dtype = dtype - self.compute_type = compute_type - - -class EmbeddingLookup(nn.Cell): - """ - Embedding lookup table with a fixed dictionary and size. - - Args: - vocab_size (int): Size of the dictionary of embeddings. - embedding_size (int): The size of each embedding vector. - embedding_shape (list): [batch_size, seq_length, embedding_size], the shape of - each embedding vector. - use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False. - initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02. - """ - - def __init__(self, - vocab_size, - embedding_size, - embedding_shape, - use_one_hot_embeddings=False, - initializer_range=0.02): - super(EmbeddingLookup, self).__init__() - self.vocab_size = vocab_size - self.use_one_hot_embeddings = use_one_hot_embeddings - self.embedding_table = Parameter(initializer - (TruncatedNormal(initializer_range), - [vocab_size, embedding_size])) - self.expand = P.ExpandDims() - self.shape_flat = (-1,) - self.gather = P.Gather() - self.one_hot = P.OneHot() - self.on_value = Tensor(1.0, mstype.float32) - self.off_value = Tensor(0.0, mstype.float32) - self.array_mul = P.MatMul() - self.reshape = P.Reshape() - self.shape = tuple(embedding_shape) - - def construct(self, input_ids): - """Get output and embeddings lookup table.""" - extended_ids = self.expand(input_ids, -1) - flat_ids = self.reshape(extended_ids, self.shape_flat) - if self.use_one_hot_embeddings: - one_hot_ids = self.one_hot(flat_ids, self.vocab_size, self.on_value, self.off_value) - output_for_reshape = self.array_mul( - one_hot_ids, self.embedding_table) - else: - output_for_reshape = self.gather(self.embedding_table, flat_ids, 0) - output = self.reshape(output_for_reshape, self.shape) - return output, self.embedding_table - - -class EmbeddingPostprocessor(nn.Cell): - """ - Postprocessor apply positional and token type embeddings to word embeddings. - - Args: - embedding_size (int): The size of each embedding vector. - embedding_shape (list): [batch_size, seq_length, embedding_size], the shape of - each embedding vector. - use_token_type (bool): Specifies whether to use token type embeddings. Default: False. - token_type_vocab_size (int): Size of token type vocab. Default: 16. - use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False. - initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02. - max_position_embeddings (int): Maximum length of sequences used in this - model. Default: 512. - dropout_prob (float): The dropout probability. Default: 0.1. - """ - - def __init__(self, - embedding_size, - embedding_shape, - use_relative_positions=False, - use_token_type=False, - token_type_vocab_size=16, - use_one_hot_embeddings=False, - initializer_range=0.02, - max_position_embeddings=512, - dropout_prob=0.1): - super(EmbeddingPostprocessor, self).__init__() - self.use_token_type = use_token_type - self.token_type_vocab_size = token_type_vocab_size - self.use_one_hot_embeddings = use_one_hot_embeddings - self.max_position_embeddings = max_position_embeddings - self.token_type_embedding = nn.Embedding( - vocab_size=token_type_vocab_size, - embedding_size=embedding_size, - use_one_hot=use_one_hot_embeddings) - self.shape_flat = (-1,) - self.one_hot = P.OneHot() - self.on_value = Tensor(1.0, mstype.float32) - self.off_value = Tensor(0.1, mstype.float32) - self.array_mul = P.MatMul() - self.reshape = P.Reshape() - self.shape = tuple(embedding_shape) - self.dropout = nn.Dropout(1 - dropout_prob) - self.gather = P.Gather() - self.use_relative_positions = use_relative_positions - self.slice = P.StridedSlice() - _, seq, _ = self.shape - self.full_position_embedding = nn.Embedding( - vocab_size=max_position_embeddings, - embedding_size=embedding_size, - use_one_hot=False) - self.layernorm = nn.LayerNorm((embedding_size,)) - self.position_ids = Tensor(np.arange(seq).reshape(-1, seq).astype(np.int32)) - self.add = P.Add() - - def construct(self, token_type_ids, word_embeddings): - """Construct the trainer of Bert.""" - output = word_embeddings - if self.use_token_type: - token_type_embeddings = self.token_type_embedding(token_type_ids) - output = self.add(output, token_type_embeddings) - if not self.use_relative_positions: - position_embeddings = self.full_position_embedding(self.position_ids) - output = self.add(output, position_embeddings) - output = self.layernorm(output) - output = self.dropout(output) - return output - - -class BertOutput(nn.Cell): - """ - Apply a linear computation to hidden status and a residual computation to input. - - Args: - in_channels (int): Input channels. - out_channels (int): Output channels. - initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02. - dropout_prob (float): The dropout probability. Default: 0.1. - compute_type (:class:`mindspore.dtype`): Compute type in BertTransformer. Default: mstype.float32. - """ - - def __init__(self, - in_channels, - out_channels, - initializer_range=0.02, - dropout_prob=0.1, - compute_type=mstype.float32): - super(BertOutput, self).__init__() - self.dense = nn.Dense(in_channels, out_channels, - weight_init=TruncatedNormal(initializer_range)).to_float(compute_type) - self.dropout = nn.Dropout(1 - dropout_prob) - self.dropout_prob = dropout_prob - self.add = P.Add() - self.layernorm = nn.LayerNorm((out_channels,)).to_float(compute_type) - self.cast = P.Cast() - - def construct(self, hidden_status, input_tensor): - """Construct the trainer of Bert.""" - output = self.dense(hidden_status) - output = self.dropout(output) - output = self.add(input_tensor, output) - output = self.layernorm(output) - return output - - -class RelaPosMatrixGenerator(nn.Cell): - """ - Generate matrix of relative positions between inputs. - - Args: - length (int): Length of one dim for the matrix to be generated. - max_relative_position (int): Max value of relative position. - """ - - def __init__(self, length, max_relative_position): - super(RelaPosMatrixGenerator, self).__init__() - self._length = length - self._max_relative_position = max_relative_position - self._min_relative_position = -max_relative_position - self.range_length = -length + 1 - - self.tile = P.Tile() - self.range_mat = P.Reshape() - self.sub = P.Sub() - self.expanddims = P.ExpandDims() - self.cast = P.Cast() - - def construct(self): - """Generate matrix of relative positions between inputs.""" - range_vec_row_out = self.cast(F.tuple_to_array(F.make_range(self._length)), mstype.int32) - range_vec_col_out = self.range_mat(range_vec_row_out, (self._length, -1)) - tile_row_out = self.tile(range_vec_row_out, (self._length,)) - tile_col_out = self.tile(range_vec_col_out, (1, self._length)) - range_mat_out = self.range_mat(tile_row_out, (self._length, self._length)) - transpose_out = self.range_mat(tile_col_out, (self._length, self._length)) - distance_mat = self.sub(range_mat_out, transpose_out) - - distance_mat_clipped = C.clip_by_value(distance_mat, - self._min_relative_position, - self._max_relative_position) - - # Shift values to be >=0. Each integer still uniquely identifies a - # relative position difference. - final_mat = distance_mat_clipped + self._max_relative_position - return final_mat - - -class RelaPosEmbeddingsGenerator(nn.Cell): - """ - Generate tensor of size [length, length, depth]. - - Args: - length (int): Length of one dim for the matrix to be generated. - depth (int): Size of each attention head. - max_relative_position (int): Maxmum value of relative position. - initializer_range (float): Initialization value of TruncatedNormal. - use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False. - """ - - def __init__(self, - length, - depth, - max_relative_position, - initializer_range, - use_one_hot_embeddings=False): - super(RelaPosEmbeddingsGenerator, self).__init__() - self.depth = depth - self.vocab_size = max_relative_position * 2 + 1 - self.use_one_hot_embeddings = use_one_hot_embeddings - - self.embeddings_table = Parameter( - initializer(TruncatedNormal(initializer_range), - [self.vocab_size, self.depth])) - - self.relative_positions_matrix = RelaPosMatrixGenerator(length=length, - max_relative_position=max_relative_position) - self.reshape = P.Reshape() - self.one_hot = nn.OneHot(depth=self.vocab_size) - self.shape = P.Shape() - self.gather = P.Gather() # index_select - self.matmul = P.BatchMatMul() - - def construct(self): - """Generate embedding for each relative position of dimension depth.""" - relative_positions_matrix_out = self.relative_positions_matrix() - - if self.use_one_hot_embeddings: - flat_relative_positions_matrix = self.reshape(relative_positions_matrix_out, (-1,)) - one_hot_relative_positions_matrix = self.one_hot( - flat_relative_positions_matrix) - embeddings = self.matmul(one_hot_relative_positions_matrix, self.embeddings_table) - my_shape = self.shape(relative_positions_matrix_out) + (self.depth,) - embeddings = self.reshape(embeddings, my_shape) - else: - embeddings = self.gather(self.embeddings_table, - relative_positions_matrix_out, 0) - return embeddings - - -class SaturateCast(nn.Cell): - """ - Perform a safe saturating cast. - - Args: - src_type (:class:`mindspore.dtype`): The type of the elements of the input tensor. Default: mstype.float32. - dst_type (:class:`mindspore.dtype`): The type of the elements of the output tensor. Default: mstype.float32. - """ - - def __init__(self, src_type=mstype.float32, dst_type=mstype.float32): - super(SaturateCast, self).__init__() - np_type = mstype.dtype_to_nptype(dst_type) - - self.tensor_min_type = float(np.finfo(np_type).min) - self.tensor_max_type = float(np.finfo(np_type).max) - - self.min_op = P.Minimum() - self.max_op = P.Maximum() - self.cast = P.Cast() - self.dst_type = dst_type - - def construct(self, x): - """Construct the trainer of Bert.""" - out = self.max_op(x, self.tensor_min_type) - out = self.min_op(out, self.tensor_max_type) - return self.cast(out, self.dst_type) - - -class BertAttention(nn.Cell): - """ - Apply multi-headed attention from "from_tensor" to "to_tensor". - - Args: - from_tensor_width (int): Size of last dim of from_tensor. - to_tensor_width (int): Size of last dim of to_tensor. - from_seq_length (int): Length of from_tensor sequence. - to_seq_length (int): Length of to_tensor sequence. - num_attention_heads (int): Number of attention heads. Default: 1. - size_per_head (int): Size of each attention head. Default: 512. - query_act (str): Activation function for the query transform. Default: None. - key_act (str): Activation function for the key transform. Default: None. - value_act (str): Activation function for the value transform. Default: None. - has_attention_mask (bool): Specifies whether to use attention mask. Default: False. - attention_probs_dropout_prob (float): The dropout probability for - BertAttention. Default: 0.0. - use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False. - initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02. - do_return_2d_tensor (bool): True for return 2d tensor. False for return 3d - tensor. Default: False. - use_relative_positions (bool): Specifies whether to use relative positions. Default: False. - compute_type (:class:`mindspore.dtype`): Compute type in BertAttention. Default: mstype.float32. - """ - - def __init__(self, - from_tensor_width, - to_tensor_width, - from_seq_length, - to_seq_length, - num_attention_heads=1, - size_per_head=512, - query_act=None, - key_act=None, - value_act=None, - has_attention_mask=False, - attention_probs_dropout_prob=0.0, - use_one_hot_embeddings=False, - initializer_range=0.02, - do_return_2d_tensor=False, - use_relative_positions=False, - compute_type=mstype.float32): - - super(BertAttention, self).__init__() - self.from_seq_length = from_seq_length - self.to_seq_length = to_seq_length - self.num_attention_heads = num_attention_heads - self.size_per_head = size_per_head - self.has_attention_mask = has_attention_mask - self.use_relative_positions = use_relative_positions - - self.scores_mul = 1.0 / math.sqrt(float(self.size_per_head)) - self.reshape = P.Reshape() - self.shape_from_2d = (-1, from_tensor_width) - self.shape_to_2d = (-1, to_tensor_width) - weight = TruncatedNormal(initializer_range) - units = num_attention_heads * size_per_head - self.query_layer = nn.Dense(from_tensor_width, - units, - activation=query_act, - weight_init=weight).to_float(compute_type) - self.key_layer = nn.Dense(to_tensor_width, - units, - activation=key_act, - weight_init=weight).to_float(compute_type) - self.value_layer = nn.Dense(to_tensor_width, - units, - activation=value_act, - weight_init=weight).to_float(compute_type) - - self.shape_from = (-1, from_seq_length, num_attention_heads, size_per_head) - self.shape_to = (-1, to_seq_length, num_attention_heads, size_per_head) - - self.matmul_trans_b = P.BatchMatMul(transpose_b=True) - self.multiply = P.Mul() - self.transpose = P.Transpose() - self.trans_shape = (0, 2, 1, 3) - self.trans_shape_relative = (2, 0, 1, 3) - self.trans_shape_position = (1, 2, 0, 3) - self.multiply_data = -10000.0 - self.matmul = P.BatchMatMul() - - self.softmax = nn.Softmax() - self.dropout = nn.Dropout(1 - attention_probs_dropout_prob) - - if self.has_attention_mask: - self.expand_dims = P.ExpandDims() - self.sub = P.Sub() - self.add = P.Add() - self.cast = P.Cast() - self.get_dtype = P.DType() - if do_return_2d_tensor: - self.shape_return = (-1, num_attention_heads * size_per_head) - else: - self.shape_return = (-1, from_seq_length, num_attention_heads * size_per_head) - - self.cast_compute_type = SaturateCast(dst_type=compute_type) - if self.use_relative_positions: - self._generate_relative_positions_embeddings = \ - RelaPosEmbeddingsGenerator(length=to_seq_length, - depth=size_per_head, - max_relative_position=16, - initializer_range=initializer_range, - use_one_hot_embeddings=use_one_hot_embeddings) - - def construct(self, from_tensor, to_tensor, attention_mask): - """Reshape 2d/3d input tensors to 2d.""" - from_tensor_2d = self.reshape(from_tensor, self.shape_from_2d) - to_tensor_2d = self.reshape(to_tensor, self.shape_to_2d) - query_out = self.query_layer(from_tensor_2d) - key_out = self.key_layer(to_tensor_2d) - value_out = self.value_layer(to_tensor_2d) - - query_layer = self.reshape(query_out, self.shape_from) - query_layer = self.transpose(query_layer, self.trans_shape) - key_layer = self.reshape(key_out, self.shape_to) - key_layer = self.transpose(key_layer, self.trans_shape) - - attention_scores = self.matmul_trans_b(query_layer, key_layer) - - # use_relative_position, supplementary logic - if self.use_relative_positions: - # relations_keys is [F|T, F|T, H] - relations_keys = self._generate_relative_positions_embeddings() - relations_keys = self.cast_compute_type(relations_keys) - # query_layer_t is [F, B, N, H] - query_layer_t = self.transpose(query_layer, self.trans_shape_relative) - # query_layer_r is [F, B * N, H] - query_layer_r = self.reshape(query_layer_t, - (self.from_seq_length, - -1, - self.size_per_head)) - # key_position_scores is [F, B * N, F|T] - key_position_scores = self.matmul_trans_b(query_layer_r, - relations_keys) - # key_position_scores_r is [F, B, N, F|T] - key_position_scores_r = self.reshape(key_position_scores, - (self.from_seq_length, - -1, - self.num_attention_heads, - self.from_seq_length)) - # key_position_scores_r_t is [B, N, F, F|T] - key_position_scores_r_t = self.transpose(key_position_scores_r, - self.trans_shape_position) - attention_scores = attention_scores + key_position_scores_r_t - - attention_scores = self.multiply(self.scores_mul, attention_scores) - - if self.has_attention_mask: - attention_mask = self.expand_dims(attention_mask, 1) - multiply_out = self.sub(self.cast(F.tuple_to_array((1.0,)), self.get_dtype(attention_scores)), - self.cast(attention_mask, self.get_dtype(attention_scores))) - - adder = self.multiply(multiply_out, self.multiply_data) - attention_scores = self.add(adder, attention_scores) - - attention_probs = self.softmax(attention_scores) - attention_probs = self.dropout(attention_probs) - - value_layer = self.reshape(value_out, self.shape_to) - value_layer = self.transpose(value_layer, self.trans_shape) - context_layer = self.matmul(attention_probs, value_layer) - - # use_relative_position, supplementary logic - if self.use_relative_positions: - # relations_values is [F|T, F|T, H] - relations_values = self._generate_relative_positions_embeddings() - relations_values = self.cast_compute_type(relations_values) - # attention_probs_t is [F, B, N, T] - attention_probs_t = self.transpose(attention_probs, self.trans_shape_relative) - # attention_probs_r is [F, B * N, T] - attention_probs_r = self.reshape( - attention_probs_t, - (self.from_seq_length, - -1, - self.to_seq_length)) - # value_position_scores is [F, B * N, H] - value_position_scores = self.matmul(attention_probs_r, - relations_values) - # value_position_scores_r is [F, B, N, H] - value_position_scores_r = self.reshape(value_position_scores, - (self.from_seq_length, - -1, - self.num_attention_heads, - self.size_per_head)) - # value_position_scores_r_t is [B, N, F, H] - value_position_scores_r_t = self.transpose(value_position_scores_r, - self.trans_shape_position) - context_layer = context_layer + value_position_scores_r_t - - context_layer = self.transpose(context_layer, self.trans_shape) - context_layer = self.reshape(context_layer, self.shape_return) - - return context_layer - - -class BertSelfAttention(nn.Cell): - """ - Apply self-attention. - - Args: - seq_length (int): Length of input sequence. - hidden_size (int): Size of the bert encoder layers. - num_attention_heads (int): Number of attention heads. Default: 12. - attention_probs_dropout_prob (float): The dropout probability for - BertAttention. Default: 0.1. - use_one_hot_embeddings (bool): Specifies whether to use one_hot encoding form. Default: False. - initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02. - hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1. - use_relative_positions (bool): Specifies whether to use relative positions. Default: False. - compute_type (:class:`mindspore.dtype`): Compute type in BertSelfAttention. Default: mstype.float32. - """ - - def __init__(self, - seq_length, - hidden_size, - num_attention_heads=12, - attention_probs_dropout_prob=0.1, - use_one_hot_embeddings=False, - initializer_range=0.02, - hidden_dropout_prob=0.1, - use_relative_positions=False, - compute_type=mstype.float32): - super(BertSelfAttention, self).__init__() - if hidden_size % num_attention_heads != 0: - raise ValueError("The hidden size (%d) is not a multiple of the number " - "of attention heads (%d)" % (hidden_size, num_attention_heads)) - - self.size_per_head = int(hidden_size / num_attention_heads) - - self.attention = BertAttention( - from_tensor_width=hidden_size, - to_tensor_width=hidden_size, - from_seq_length=seq_length, - to_seq_length=seq_length, - num_attention_heads=num_attention_heads, - size_per_head=self.size_per_head, - attention_probs_dropout_prob=attention_probs_dropout_prob, - use_one_hot_embeddings=use_one_hot_embeddings, - initializer_range=initializer_range, - use_relative_positions=use_relative_positions, - has_attention_mask=True, - do_return_2d_tensor=True, - compute_type=compute_type) - - self.output = BertOutput(in_channels=hidden_size, - out_channels=hidden_size, - initializer_range=initializer_range, - dropout_prob=hidden_dropout_prob, - compute_type=compute_type) - self.reshape = P.Reshape() - self.shape = (-1, hidden_size) - - def construct(self, input_tensor, attention_mask): - """Construct the trainer of Bert.""" - input_tensor = self.reshape(input_tensor, self.shape) - attention_output = self.attention(input_tensor, input_tensor, attention_mask) - output = self.output(attention_output, input_tensor) - return output - - -class BertEncoderCell(nn.Cell): - """ - Encode cells used in BertTransformer. - - Args: - hidden_size (int): Size of the bert encoder layers. Default: 768. - seq_length (int): Length of input sequence. Default: 512. - num_attention_heads (int): Number of attention heads. Default: 12. - intermediate_size (int): Size of intermediate layer. Default: 3072. - attention_probs_dropout_prob (float): The dropout probability for - BertAttention. Default: 0.02. - use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False. - initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02. - hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1. - use_relative_positions (bool): Specifies whether to use relative positions. Default: False. - hidden_act (str): Activation function. Default: "gelu". - compute_type (:class:`mindspore.dtype`): Compute type in attention. Default: mstype.float32. - """ - - def __init__(self, - hidden_size=768, - seq_length=512, - num_attention_heads=12, - intermediate_size=3072, - attention_probs_dropout_prob=0.02, - use_one_hot_embeddings=False, - initializer_range=0.02, - hidden_dropout_prob=0.1, - use_relative_positions=False, - hidden_act="gelu", - compute_type=mstype.float32): - super(BertEncoderCell, self).__init__() - self.attention = BertSelfAttention( - hidden_size=hidden_size, - seq_length=seq_length, - num_attention_heads=num_attention_heads, - attention_probs_dropout_prob=attention_probs_dropout_prob, - use_one_hot_embeddings=use_one_hot_embeddings, - initializer_range=initializer_range, - hidden_dropout_prob=hidden_dropout_prob, - use_relative_positions=use_relative_positions, - compute_type=compute_type) - self.intermediate = nn.Dense(in_channels=hidden_size, - out_channels=intermediate_size, - activation=hidden_act, - weight_init=TruncatedNormal(initializer_range)).to_float(compute_type) - self.output = BertOutput(in_channels=intermediate_size, - out_channels=hidden_size, - initializer_range=initializer_range, - dropout_prob=hidden_dropout_prob, - compute_type=compute_type) - - def construct(self, hidden_states, attention_mask): - """Construct the trainer of Bert.""" - # self-attention - attention_output = self.attention(hidden_states, attention_mask) - # feed construct - intermediate_output = self.intermediate(attention_output) - # add and normalize - output = self.output(intermediate_output, attention_output) - return output - - -class BertTransformer(nn.Cell): - """ - Multi-layer bert transformer. - - Args: - hidden_size (int): Size of the encoder layers. - seq_length (int): Length of input sequence. - num_hidden_layers (int): Number of hidden layers in encoder cells. - num_attention_heads (int): Number of attention heads in encoder cells. Default: 12. - intermediate_size (int): Size of intermediate layer in encoder cells. Default: 3072. - attention_probs_dropout_prob (float): The dropout probability for - BertAttention. Default: 0.1. - use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False. - initializer_range (float): Initialization value of TruncatedNormal. Default: 0.02. - hidden_dropout_prob (float): The dropout probability for BertOutput. Default: 0.1. - use_relative_positions (bool): Specifies whether to use relative positions. Default: False. - hidden_act (str): Activation function used in the encoder cells. Default: "gelu". - compute_type (:class:`mindspore.dtype`): Compute type in BertTransformer. Default: mstype.float32. - return_all_encoders (bool): Specifies whether to return all encoders. Default: False. - """ - - def __init__(self, - hidden_size, - seq_length, - num_hidden_layers, - num_attention_heads=12, - intermediate_size=3072, - attention_probs_dropout_prob=0.1, - use_one_hot_embeddings=False, - initializer_range=0.02, - hidden_dropout_prob=0.1, - use_relative_positions=False, - hidden_act="gelu", - compute_type=mstype.float32, - return_all_encoders=False): - super(BertTransformer, self).__init__() - self.return_all_encoders = return_all_encoders - - layers = [] - for _ in range(num_hidden_layers): - layer = BertEncoderCell(hidden_size=hidden_size, - seq_length=seq_length, - num_attention_heads=num_attention_heads, - intermediate_size=intermediate_size, - attention_probs_dropout_prob=attention_probs_dropout_prob, - use_one_hot_embeddings=use_one_hot_embeddings, - initializer_range=initializer_range, - hidden_dropout_prob=hidden_dropout_prob, - use_relative_positions=use_relative_positions, - hidden_act=hidden_act, - compute_type=compute_type) - layers.append(layer) - - self.layers = nn.CellList(layers) - - self.reshape = P.Reshape() - self.shape = (-1, hidden_size) - self.out_shape = (-1, seq_length, hidden_size) - - def construct(self, input_tensor, attention_mask): - """Construct the trainer of Bert.""" - prev_output = self.reshape(input_tensor, self.shape) - - all_encoder_layers = () - for layer_module in self.layers: - layer_output = layer_module(prev_output, attention_mask) - prev_output = layer_output - - if self.return_all_encoders: - layer_output = self.reshape(layer_output, self.out_shape) - all_encoder_layers = all_encoder_layers + (layer_output,) - - if not self.return_all_encoders: - prev_output = self.reshape(prev_output, self.out_shape) - all_encoder_layers = all_encoder_layers + (prev_output,) - return all_encoder_layers - - -class CreateAttentionMaskFromInputMask(nn.Cell): - """ - Create attention mask according to input mask. - - Args: - config (Class): Configuration for BertModel. - """ - - def __init__(self, config): - super(CreateAttentionMaskFromInputMask, self).__init__() - self.input_mask = None - - self.cast = P.Cast() - self.reshape = P.Reshape() - self.shape = (-1, 1, config.seq_length) - - def construct(self, input_mask): - """Construct the trainer of Bert.""" - attention_mask = self.cast(self.reshape(input_mask, self.shape), mstype.float32) - return attention_mask - - -class BertModel(nn.Cell): - """ - Encode Representations from Transformers. - - Args: - config (Class): Configuration for BertModel. - is_training (bool): True for training mode. False for eval mode. - use_one_hot_embeddings (bool): Specifies whether to use one hot encoding form. Default: False. - """ - - def __init__(self, - config, - is_training, - use_one_hot_embeddings=False): - super(BertModel, self).__init__() - config = copy.deepcopy(config) - if not is_training: - config.hidden_dropout_prob = 0.0 - config.attention_probs_dropout_prob = 0.0 - - self.seq_length = config.seq_length - self.hidden_size = config.hidden_size - self.num_hidden_layers = config.num_hidden_layers - self.embedding_size = config.hidden_size - self.token_type_ids = None - - self.last_idx = self.num_hidden_layers - 1 - output_embedding_shape = [-1, self.seq_length, self.embedding_size] - - self.bert_embedding_lookup = nn.Embedding( - vocab_size=config.vocab_size, - embedding_size=self.embedding_size, - use_one_hot=use_one_hot_embeddings, - embedding_table=TruncatedNormal(config.initializer_range)) - - self.bert_embedding_postprocessor = EmbeddingPostprocessor( - embedding_size=self.embedding_size, - embedding_shape=output_embedding_shape, - use_relative_positions=config.use_relative_positions, - use_token_type=True, - token_type_vocab_size=config.type_vocab_size, - use_one_hot_embeddings=use_one_hot_embeddings, - initializer_range=0.02, - max_position_embeddings=config.max_position_embeddings, - dropout_prob=config.hidden_dropout_prob) - - self.bert_encoder = BertTransformer( - hidden_size=self.hidden_size, - seq_length=self.seq_length, - num_attention_heads=config.num_attention_heads, - num_hidden_layers=self.num_hidden_layers, - intermediate_size=config.intermediate_size, - attention_probs_dropout_prob=config.attention_probs_dropout_prob, - use_one_hot_embeddings=use_one_hot_embeddings, - initializer_range=config.initializer_range, - hidden_dropout_prob=config.hidden_dropout_prob, - use_relative_positions=config.use_relative_positions, - hidden_act=config.hidden_act, - compute_type=config.compute_type, - return_all_encoders=True) - - self.cast = P.Cast() - self.dtype = config.dtype - self.cast_compute_type = SaturateCast(dst_type=config.compute_type) - self.slice = P.StridedSlice() - - self.squeeze_1 = P.Squeeze(axis=1) - self.dense = nn.Dense(self.hidden_size, self.hidden_size, - activation="tanh", - weight_init=TruncatedNormal(config.initializer_range)).to_float(config.compute_type) - self._create_attention_mask_from_input_mask = CreateAttentionMaskFromInputMask(config) - - def construct(self, input_ids, token_type_ids, input_mask): - """Construct the trainer of Bert.""" - # embedding - embedding_tables = self.bert_embedding_lookup.embedding_table - word_embeddings = self.bert_embedding_lookup(input_ids) - embedding_output = self.bert_embedding_postprocessor(token_type_ids, - word_embeddings) - - # attention mask [batch_size, seq_length, seq_length] - attention_mask = self._create_attention_mask_from_input_mask(input_mask) - - # bert encoder - encoder_output = self.bert_encoder(self.cast_compute_type(embedding_output), - attention_mask) - - sequence_output = self.cast(encoder_output[self.last_idx], self.dtype) - - # pooler - batch_size = P.Shape()(input_ids)[0] - sequence_slice = self.slice(sequence_output, - (0, 0, 0), - (batch_size, 1, self.hidden_size), - (1, 1, 1)) - first_token = self.squeeze_1(sequence_slice) - pooled_output = self.dense(first_token) - pooled_output = self.cast(pooled_output, self.dtype) - - return sequence_output, pooled_output, embedding_tables diff --git a/vega/algorithms/nlp/src/cluener_evaluation.py b/vega/algorithms/nlp/src/cluener_evaluation.py deleted file mode 100644 index 8ca4812..0000000 --- a/vega/algorithms/nlp/src/cluener_evaluation.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Bert clue evaluation.""" - -import json -import numpy as np -import mindspore.common.dtype as mstype -from mindspore.common.tensor import Tensor -from . import tokenization -from .sample_process import label_generation, process_one_example_p -from .CRF import postprocess -from .model_utils.config import bert_net_cfg -from .score import get_result - - -def process(model=None, text="", tokenizer_=None, use_crf="", tag_to_index=None, vocab=""): - """Process text.""" - data = [text] - features = [] - res = [] - ids = [] - for i in data: - feature = process_one_example_p(tokenizer_, vocab, i, max_seq_len=bert_net_cfg.seq_length) - features.append(feature) - input_ids, input_mask, token_type_id = feature - input_ids = Tensor(np.array(input_ids), mstype.int32) - input_mask = Tensor(np.array(input_mask), mstype.int32) - token_type_id = Tensor(np.array(token_type_id), mstype.int32) - if use_crf.lower() == "true": - backpointers, best_tag_id = model.predict(input_ids, input_mask, token_type_id, Tensor(1)) - best_path = postprocess(backpointers, best_tag_id) - logits = [] - for ele in best_path: - logits.extend(ele) - ids = logits - else: - logits = model.predict(input_ids, input_mask, token_type_id, Tensor(1)) - ids = logits.asnumpy() - ids = np.argmax(ids, axis=-1) - ids = list(ids) - res = label_generation(text=text, probs=ids, tag_to_index=tag_to_index) - return res - - -def submit(model=None, path="", vocab_file="", use_crf="", label_file="", tag_to_index=None): - """Submit task.""" - tokenizer_ = tokenization.FullTokenizer(vocab_file=vocab_file) - data = [] - for line in open(path): - if not line.strip(): - continue - oneline = json.loads(line.strip()) - res = process(model=model, text=oneline["text"], tokenizer_=tokenizer_, - use_crf=use_crf, tag_to_index=tag_to_index, vocab=vocab_file) - data.append(json.dumps({"label": res}, ensure_ascii=False)) - open("ner_predict.json", "w").write("\n".join(data)) - labels = [] - with open(label_file) as f: - for label in f: - labels.append(label.strip()) - get_result(labels, "ner_predict.json", path) diff --git a/vega/algorithms/nlp/src/dataset.py b/vega/algorithms/nlp/src/dataset.py deleted file mode 100644 index 57c06b7..0000000 --- a/vega/algorithms/nlp/src/dataset.py +++ /dev/null @@ -1,127 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ -"""Data operations, will be used in run_pretrain.py.""" - -import os -import mindspore.common.dtype as mstype -import mindspore.dataset as ds -import mindspore.dataset.transforms.c_transforms as C -from mindspore import log as logger - - -def create_bert_dataset(device_num=1, rank=0, do_shuffle="true", data_dir=None, schema_dir=None, batch_size=32): - """Create train dataset.""" - # apply repeat operations - files = os.listdir(data_dir) - data_files = [] - for file_name in files: - if "tfrecord" in file_name: - data_files.append(os.path.join(data_dir, file_name)) - data_set = ds.TFRecordDataset(data_files, schema_dir if schema_dir != "" else None, - columns_list=["input_ids", "input_mask", "segment_ids", "next_sentence_labels", - "masked_lm_positions", "masked_lm_ids", "masked_lm_weights"], - shuffle=ds.Shuffle.FILES if do_shuffle == "true" else False, - num_shards=device_num, shard_id=rank, shard_equal_rows=True) - ori_dataset_size = data_set.get_dataset_size() - print('origin dataset size: ', ori_dataset_size) - type_cast_op = C.TypeCast(mstype.int32) - data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_ids") - data_set = data_set.map(operations=type_cast_op, input_columns="masked_lm_positions") - data_set = data_set.map(operations=type_cast_op, input_columns="next_sentence_labels") - data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids") - data_set = data_set.map(operations=type_cast_op, input_columns="input_mask") - data_set = data_set.map(operations=type_cast_op, input_columns="input_ids") - # apply batch operations - data_set = data_set.batch(batch_size, drop_remainder=True) - logger.info("data size: {}".format(data_set.get_dataset_size())) - logger.info("repeat count: {}".format(data_set.get_repeat_count())) - return data_set - - -def create_ner_dataset(batch_size=1, repeat_count=1, assessment_method="accuracy", data_file_path=None, - dataset_format="mindrecord", schema_file_path=None, do_shuffle=True, drop_remainder=True): - """Create finetune or evaluation dataset.""" - type_cast_op = C.TypeCast(mstype.int32) - if dataset_format == "mindrecord": - dataset = ds.MindDataset([data_file_path], - columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"], - shuffle=do_shuffle) - else: - dataset = ds.TFRecordDataset([data_file_path], schema_file_path if schema_file_path != "" else None, - columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"], - shuffle=do_shuffle) - if assessment_method == "Spearman_correlation": - type_cast_op_float = C.TypeCast(mstype.float32) - dataset = dataset.map(operations=type_cast_op_float, input_columns="label_ids") - else: - dataset = dataset.map(operations=type_cast_op, input_columns="label_ids") - dataset = dataset.map(operations=type_cast_op, input_columns="segment_ids") - dataset = dataset.map(operations=type_cast_op, input_columns="input_mask") - dataset = dataset.map(operations=type_cast_op, input_columns="input_ids") - dataset = dataset.repeat(repeat_count) - # apply batch operations - dataset = dataset.batch(batch_size, drop_remainder=drop_remainder) - return dataset - - -def create_classification_dataset(batch_size=1, repeat_count=1, assessment_method="accuracy", - data_file_path=None, schema_file_path=None, do_shuffle=True): - """Create finetune or evaluation dataset.""" - type_cast_op = C.TypeCast(mstype.int32) - data_set = ds.TFRecordDataset([data_file_path], schema_file_path if schema_file_path != "" else None, - columns_list=["input_ids", "input_mask", "segment_ids", "label_ids"], - shuffle=do_shuffle) - if assessment_method == "Spearman_correlation": - type_cast_op_float = C.TypeCast(mstype.float32) - data_set = data_set.map(operations=type_cast_op_float, input_columns="label_ids") - else: - data_set = data_set.map(operations=type_cast_op, input_columns="label_ids") - data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids") - data_set = data_set.map(operations=type_cast_op, input_columns="input_mask") - data_set = data_set.map(operations=type_cast_op, input_columns="input_ids") - data_set = data_set.repeat(repeat_count) - # apply batch operations - data_set = data_set.batch(batch_size, drop_remainder=True) - return data_set - - -def generator_squad(data_features): - """Construct the trainer of Bert.""" - for feature in data_features: - yield (feature.input_ids, feature.input_mask, feature.segment_ids, feature.unique_id) - - -def create_squad_dataset(batch_size=1, repeat_count=1, data_file_path=None, schema_file_path=None, - is_training=True, do_shuffle=True): - """Create finetune or evaluation dataset.""" - type_cast_op = C.TypeCast(mstype.int32) - if is_training: - data_set = ds.TFRecordDataset([data_file_path], schema_file_path if schema_file_path != "" else None, - columns_list=["input_ids", "input_mask", "segment_ids", "start_positions", - "end_positions", "unique_ids", "is_impossible"], - shuffle=do_shuffle) - data_set = data_set.map(operations=type_cast_op, input_columns="start_positions") - data_set = data_set.map(operations=type_cast_op, input_columns="end_positions") - else: - data_set = ds.GeneratorDataset(generator_squad(data_file_path), shuffle=do_shuffle, - column_names=["input_ids", "input_mask", "segment_ids", "unique_ids"]) - data_set = data_set.map(operations=type_cast_op, input_columns="segment_ids") - data_set = data_set.map(operations=type_cast_op, input_columns="input_mask") - data_set = data_set.map(operations=type_cast_op, input_columns="input_ids") - data_set = data_set.map(operations=type_cast_op, input_columns="unique_ids") - data_set = data_set.repeat(repeat_count) - # apply batch operations - data_set = data_set.batch(batch_size, drop_remainder=True) - return data_set diff --git a/vega/algorithms/nlp/src/finetune_eval_model.py b/vega/algorithms/nlp/src/finetune_eval_model.py deleted file mode 100644 index d184051..0000000 --- a/vega/algorithms/nlp/src/finetune_eval_model.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Bert finetune and evaluation model script.""" - -import mindspore.nn as nn -from mindspore.common.initializer import TruncatedNormal -from mindspore.ops import operations as P -from .bert_model import BertModel - - -class BertCLSModel(nn.Cell): - """Construct the trainer of Bert.""" - - def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False, - assessment_method=""): - super(BertCLSModel, self).__init__() - if not is_training: - config.hidden_dropout_prob = 0.0 - config.hidden_probs_dropout_prob = 0.0 - self.bert = BertModel(config, is_training, use_one_hot_embeddings) - self.cast = P.Cast() - self.weight_init = TruncatedNormal(config.initializer_range) - self.log_softmax = P.LogSoftmax(axis=-1) - self.dtype = config.dtype - self.num_labels = num_labels - self.dense_1 = nn.Dense(config.hidden_size, self.num_labels, weight_init=self.weight_init, - has_bias=True).to_float(config.compute_type) - self.dropout = nn.Dropout(1 - dropout_prob) - self.assessment_method = assessment_method - - def construct(self, input_ids, input_mask, token_type_id): - """Construct the trainer of Bert.""" - _, pooled_output, _ = \ - self.bert(input_ids, token_type_id, input_mask) - cls = self.cast(pooled_output, self.dtype) - cls = self.dropout(cls) - logits = self.dense_1(cls) - logits = self.cast(logits, self.dtype) - if self.assessment_method != "spearman_correlation": - logits = self.log_softmax(logits) - return logits - - -class BertSquadModel(nn.Cell): - """Construct the trainer of Bert.""" - - def __init__(self, config, is_training, num_labels=2, dropout_prob=0.0, use_one_hot_embeddings=False): - super(BertSquadModel, self).__init__() - if not is_training: - config.hidden_dropout_prob = 0.0 - config.hidden_probs_dropout_prob = 0.0 - self.bert = BertModel(config, is_training, use_one_hot_embeddings) - self.weight_init = TruncatedNormal(config.initializer_range) - self.dense1 = nn.Dense(config.hidden_size, num_labels, weight_init=self.weight_init, - has_bias=True).to_float(config.compute_type) - self.num_labels = num_labels - self.dtype = config.dtype - self.log_softmax = P.LogSoftmax(axis=1) - self.is_training = is_training - - def construct(self, input_ids, input_mask, token_type_id): - """Construct the trainer of Bert.""" - sequence_output, _, _ = self.bert(input_ids, token_type_id, input_mask) - batch_size, seq_length, hidden_size = P.Shape()(sequence_output) - sequence = P.Reshape()(sequence_output, (-1, hidden_size)) - logits = self.dense1(sequence) - logits = P.Cast()(logits, self.dtype) - logits = P.Reshape()(logits, (batch_size, seq_length, self.num_labels)) - logits = self.log_softmax(logits) - return logits - - -class BertNERModel(nn.Cell): - """Construct the trainer of Bert.""" - - def __init__(self, config, is_training, num_labels=11, use_crf=False, dropout_prob=0.0, - use_one_hot_embeddings=False): - super(BertNERModel, self).__init__() - if not is_training: - config.hidden_dropout_prob = 0.0 - config.hidden_probs_dropout_prob = 0.0 - self.bert = BertModel(config, is_training, use_one_hot_embeddings) - self.cast = P.Cast() - self.weight_init = TruncatedNormal(config.initializer_range) - self.log_softmax = P.LogSoftmax(axis=-1) - self.dtype = config.dtype - self.num_labels = num_labels - self.dense_1 = nn.Dense(config.hidden_size, self.num_labels, weight_init=self.weight_init, - has_bias=True).to_float(config.compute_type) - self.dropout = nn.Dropout(1 - dropout_prob) - self.reshape = P.Reshape() - self.shape = (-1, config.hidden_size) - self.use_crf = use_crf - self.origin_shape = (-1, config.seq_length, self.num_labels) - - def construct(self, input_ids, input_mask, token_type_id): - """Construct the trainer of Bert.""" - sequence_output, _, _ = \ - self.bert(input_ids, token_type_id, input_mask) - seq = self.dropout(sequence_output) - seq = self.reshape(seq, self.shape) - logits = self.dense_1(seq) - logits = self.cast(logits, self.dtype) - if self.use_crf: - return_value = self.reshape(logits, self.origin_shape) - else: - return_value = self.log_softmax(logits) - return return_value diff --git a/vega/algorithms/nlp/src/model_utils/config.py b/vega/algorithms/nlp/src/model_utils/config.py deleted file mode 100644 index ab111aa..0000000 --- a/vega/algorithms/nlp/src/model_utils/config.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2021 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Parse arguments.""" - -import os -import ast -import argparse -from pprint import pformat -import yaml -import mindspore.common.dtype as mstype -from ..bert_model import BertConfig - - -class Config: - """Construct the trainer of Bert.""" - - def __init__(self, cfg_dict): - for k, v in cfg_dict.items(): - if isinstance(v, (list, tuple)): - setattr(self, k, [Config(x) if isinstance(x, dict) else x for x in v]) - else: - setattr(self, k, Config(v) if isinstance(v, dict) else v) - - def __str__(self): - """Construct the trainer of Bert.""" - return pformat(self.__dict__) - - def __repr__(self): - """Construct the trainer of Bert.""" - return self.__str__() - - -def parse_cli_to_yaml(parser, cfg, helper=None, choices=None, cfg_path="pretrain_base_config.yaml"): - """ - Parse command line arguments to the configuration according to the default yaml. - - Args: - parser: Parent parser. - cfg: Base configuration. - helper: Helper description. - cfg_path: Path to the default yaml config. - """ - parser = argparse.ArgumentParser(description="[REPLACE THIS at config.py]", - parents=[parser]) - helper = {} if helper is None else helper - choices = {} if choices is None else choices - for item in cfg: - if not isinstance(cfg[item], list) and not isinstance(cfg[item], dict): - help_description = helper[item] if item in helper else "Please reference to {}".format(cfg_path) - choice = choices[item] if item in choices else None - if isinstance(cfg[item], bool): - parser.add_argument("--" + item, type=ast.literal_eval, default=cfg[item], choices=choice, - help=help_description) - else: - parser.add_argument("--" + item, type=type(cfg[item]), default=cfg[item], choices=choice, - help=help_description) - args = parser.parse_args() - return args - - -def parse_yaml(yaml_path): - """ - Parse the yaml config file. - - Args: - yaml_path: Path to the yaml config. - """ - with open(yaml_path, 'r') as fin: - try: - cfgs = yaml.load_all(fin.read(), Loader=yaml.FullLoader) - cfgs = [x for x in cfgs] - if len(cfgs) == 1: - cfg_helper = {} - cfg = cfgs[0] - cfg_choices = {} - elif len(cfgs) == 2: - cfg, cfg_helper = cfgs - cfg_choices = {} - elif len(cfgs) == 3: - cfg, cfg_helper, cfg_choices = cfgs - else: - raise ValueError("At most 3 docs (config, description for help, choices) are supported in config yaml") - # print(cfg_helper) - except Exception: - raise ValueError("Failed to parse yaml") - return cfg, cfg_helper, cfg_choices - - -def merge(args, cfg): - """ - Merge the base config from yaml file and command line arguments. - - Args: - args: Command line arguments. - cfg: Base configuration. - """ - args_var = vars(args) - for item in args_var: - cfg[item] = args_var[item] - return cfg - - -def parse_dtype(dtype): - """Construct the trainer of Bert.""" - if dtype not in ["mstype.float32", "mstype.float16"]: - raise ValueError("Not supported dtype") - - if dtype == "mstype.float32": - return mstype.float32 - if dtype == "mstype.float16": - return mstype.float16 - return None - - -def extra_operations(cfg): - """ - Do extra work on config. - - Args: - config: Object after instantiation of class 'Config'. - """ - def create_filter_fun(keywords): - return lambda x: not (True in [key in x.name.lower() for key in keywords]) - - if cfg.description == 'run_pretrain': - cfg.AdamWeightDecay.decay_filter = create_filter_fun(cfg.AdamWeightDecay.decay_filter) - cfg.Lamb.decay_filter = create_filter_fun(cfg.Lamb.decay_filter) - cfg.base_net_cfg.dtype = parse_dtype(cfg.base_net_cfg.dtype) - cfg.base_net_cfg.compute_type = parse_dtype(cfg.base_net_cfg.compute_type) - cfg.nezha_net_cfg.dtype = parse_dtype(cfg.nezha_net_cfg.dtype) - cfg.nezha_net_cfg.compute_type = parse_dtype(cfg.nezha_net_cfg.compute_type) - cfg.large_net_cfg.dtype = parse_dtype(cfg.large_net_cfg.dtype) - cfg.large_net_cfg.compute_type = parse_dtype(cfg.large_net_cfg.compute_type) - cfg.large_acc_net_cfg.dtype = parse_dtype(cfg.large_acc_net_cfg.dtype) - cfg.large_acc_net_cfg.compute_type = parse_dtype(cfg.large_acc_net_cfg.compute_type) - if cfg.bert_network == 'base': - cfg.batch_size = cfg.base_batch_size - _bert_net_cfg = cfg.base_net_cfg - elif cfg.bert_network == 'nezha': - cfg.batch_size = cfg.nezha_batch_size - _bert_net_cfg = cfg.nezha_net_cfg - elif cfg.bert_network == 'large': - cfg.batch_size = cfg.large_batch_size - _bert_net_cfg = cfg.large_net_cfg - elif cfg.bert_network == 'large_acc': - cfg.batch_size = cfg.large_acc_batch_size - _bert_net_cfg = cfg.large_acc_net_cfg - else: - pass - cfg.bert_net_cfg = BertConfig(**_bert_net_cfg.__dict__) - elif cfg.description == 'run_ner': - cfg.optimizer_cfg.AdamWeightDecay.decay_filter = \ - create_filter_fun(cfg.optimizer_cfg.AdamWeightDecay.decay_filter) - cfg.optimizer_cfg.Lamb.decay_filter = create_filter_fun(cfg.optimizer_cfg.Lamb.decay_filter) - cfg.bert_net_cfg.dtype = mstype.float32 - cfg.bert_net_cfg.compute_type = mstype.float16 - cfg.bert_net_cfg = BertConfig(**cfg.bert_net_cfg.__dict__) - - elif cfg.description == 'run_squad': - cfg.optimizer_cfg.AdamWeightDecay.decay_filter = \ - create_filter_fun(cfg.optimizer_cfg.AdamWeightDecay.decay_filter) - cfg.optimizer_cfg.Lamb.decay_filter = create_filter_fun(cfg.optimizer_cfg.Lamb.decay_filter) - cfg.bert_net_cfg.dtype = mstype.float32 - cfg.bert_net_cfg.compute_type = mstype.float16 - cfg.bert_net_cfg = BertConfig(**cfg.bert_net_cfg.__dict__) - - elif cfg.description == 'run_classifier': - cfg.optimizer_cfg.AdamWeightDecay.decay_filter = \ - create_filter_fun(cfg.optimizer_cfg.AdamWeightDecay.decay_filter) - cfg.optimizer_cfg.Lamb.decay_filter = create_filter_fun(cfg.optimizer_cfg.Lamb.decay_filter) - cfg.bert_net_cfg.dtype = mstype.float32 - cfg.bert_net_cfg.compute_type = mstype.float16 - cfg.bert_net_cfg = BertConfig(**cfg.bert_net_cfg.__dict__) - else: - pass - - -def get_config(): - """Get Config according to the yaml file and cli arguments.""" - def get_abs_path(path_relative): - current_dir = os.path.dirname(os.path.abspath(__file__)) - return os.path.join(current_dir, path_relative) - parser = argparse.ArgumentParser(description="default name", add_help=False) - parser.add_argument("--config_path", type=get_abs_path, default="./pretrain_config.yaml", - help="Config file path") - path_args, _ = parser.parse_known_args() - default, helper, choices = parse_yaml(path_args.config_path) - # pprint(default) - config_obj = Config(default) - extra_operations(config_obj) - return config_obj - - -config = get_config() -bert_net_cfg = config.bert_net_cfg -if config.description in ('run_classifier', 'run_ner', 'run_squad'): - optimizer_cfg = config.optimizer_cfg - - -if __name__ == '__main__': - print(config) diff --git a/vega/algorithms/nlp/src/model_utils/pretrain_config.yaml b/vega/algorithms/nlp/src/model_utils/pretrain_config.yaml deleted file mode 100644 index e42285c..0000000 --- a/vega/algorithms/nlp/src/model_utils/pretrain_config.yaml +++ /dev/null @@ -1,194 +0,0 @@ -# Builtin Configurations(DO NOT CHANGE THESE CONFIGURATIONS unless you know exactly what you are doing) -enable_modelarts: False -# Url for modelarts -data_url: "" -train_url: "" -checkpoint_url: "" -# Path for local -data_path: "/root/lzc/wiki/" -output_path: "/root/lzc/bert/" -load_path: "/root/lzc/checkpoint_path/" -device_target: "Ascend" -enable_profiling: False - -# ============================================================================== -description: 'run_pretrain' -distribute: 'false' -epoch_size: 40 -device_id: 0 -device_num: 1 -enable_save_ckpt: 'true' -enable_lossscale: 'true' -do_shuffle: 'true' -enable_data_sink: 'true' -data_sink_steps: 1 -accumulation_steps: 1 -allreduce_post_accumulation: 'true' -save_checkpoint_path: '' -load_checkpoint_path: '' -save_checkpoint_steps: 1000 -train_steps: -1 -save_checkpoint_num: 1 -data_dir: '/root/lzc/wiki/' -schema_dir: '' - -# ============================================================================== -# pretrain related -batch_size: 32 -# Available: [base, nezha, large, large_acc] -bert_network: 'base' -loss_scale_value: 65536 -scale_factor: 2 -scale_window: 1000 -optimizer: 'Lamb' -enable_global_norm: False -# pretrain_eval related -data_file: "" -schema_file: "" -finetune_ckpt: "" -# optimizer related -AdamWeightDecay: - learning_rate: 0.00003 # 3e-5 - end_learning_rate: 0.0 - power: 5.0 - weight_decay: 0.00001 # 1e-5 - decay_filter: ['layernorm', 'bias'] - eps: 0.000001 # 1e-6 - warmup_steps: 10000 - -Lamb: - learning_rate: 0.0003 # 3e-4 - end_learning_rate: 0.0 - power: 2.0 - warmup_steps: 10000 - weight_decay: 0.01 - decay_filter: ['layernorm', 'bias'] - eps: 0.00000001 # 1e-8, - -Momentum: - learning_rate: 0.00002 # 2e-5 - momentum: 0.9 - -Thor: - lr_max: 0.006464 - lr_min: 0.000001 # 1e-6 - lr_power: 2.0 - lr_total_steps: 30000 - damping_max: 0.007035 - damping_min: 0.000001 # 1e-6 - damping_power: 4.0 - damping_total_steps: 30000 - momentum: 0.9 - weight_decay: 0.00001 # 1e-5 - loss_scale: 1024.0 - frequency: 100 -# ============================================================================== -# base -base_batch_size: 256 -base_net_cfg: - seq_length: 128 - vocab_size: 21128 - hidden_size: 768 - num_hidden_layers: 12 - num_attention_heads: 12 - intermediate_size: 3072 - hidden_act: "gelu" - hidden_dropout_prob: 0.1 - attention_probs_dropout_prob: 0.1 - max_position_embeddings: 512 - type_vocab_size: 2 - initializer_range: 0.02 - use_relative_positions: False - dtype: mstype.float32 - compute_type: mstype.float16 -# nezha -nezha_batch_size: 96 -nezha_net_cfg: - seq_length: 128 - vocab_size: 21128 - hidden_size: 1024 - num_hidden_layers: 24 - num_attention_heads: 16 - intermediate_size: 4096 - hidden_act: "gelu" - hidden_dropout_prob: 0.1 - attention_probs_dropout_prob: 0.1 - max_position_embeddings: 512 - type_vocab_size: 2 - initializer_range: 0.02 - use_relative_positions: True - dtype: mstype.float32 - compute_type: mstype.float16 -# large -large_batch_size: 24 -large_net_cfg: - seq_length: 512 - vocab_size: 30522 - hidden_size: 1024 - num_hidden_layers: 24 - num_attention_heads: 16 - intermediate_size: 4096 - hidden_act: "gelu" - hidden_dropout_prob: 0.1 - attention_probs_dropout_prob: 0.1 - max_position_embeddings: 512 - type_vocab_size: 2 - initializer_range: 0.02 - use_relative_positions: False - dtype: mstype.float32 - compute_type: mstype.float16 -# Accelerated large network which is only supported in Ascend yet. -large_acc_batch_size: 24 -large_acc_net_cfg: - seq_length: 512 - vocab_size: 30522 - hidden_size: 1024 - num_hidden_layers: 24 - num_attention_heads: 16 - intermediate_size: 4096 - hidden_act: "fast_gelu" - hidden_dropout_prob: 0.1 - attention_probs_dropout_prob: 0.1 - max_position_embeddings: 512 - type_vocab_size: 2 - initializer_range: 0.02 - use_relative_positions: False - dtype: mstype.float32 - compute_type: mstype.float16 - - ---- -# Help description for each configuration -enable_modelarts: "Whether training on modelarts, default: False" -data_url: "Url for modelarts" -train_url: "Url for modelarts" -data_path: "The location of the input data." -output_path: "The location of the output file." -device_target: "Running platform, choose from Ascend or CPU, and default is Ascend." -enable_profiling: 'Whether enable profiling while training, default: False' - -distribute: "Run distribute, default is 'false'." -epoch_size: "Epoch size, default is 1." -enable_save_ckpt: "Enable save checkpoint, default is true." -enable_lossscale: "Use lossscale or not, default is not." -do_shuffle: "Enable shuffle for dataset, default is true." -enable_data_sink: "Enable data sink, default is true." -data_sink_steps: "Sink steps for each epoch, default is 1." -accumulation_steps: "Accumulating gradients N times before weight update, default is 1." -allreduce_post_accumulation: "Whether to allreduce after accumulation of N steps or after each step, default is true." -save_checkpoint_path: "Save checkpoint path" -load_checkpoint_path: "Load checkpoint file path" -save_checkpoint_steps: "Save checkpoint steps, default is 1000" -train_steps: "Training Steps, default is -1, meaning run all steps according to epoch number." -save_checkpoint_num: "Save checkpoint numbers, default is 1." -data_dir: "Data path, it is better to use absolute path" -schema_dir: "Schema path, it is better to use absolute path" ---- -# chocies -device_target: ['Ascend', 'GPU'] -distribute: ["true", "false"] -enable_save_ckpt: ["true", "false"] -enable_lossscale: ["true", "false"] -do_shuffle: ["true", "false"] -enable_data_sink: ["true", "false"] -allreduce_post_accumulation: ["true", "false"] diff --git a/vega/algorithms/nlp/src/sample_process.py b/vega/algorithms/nlp/src/sample_process.py deleted file mode 100644 index 99b99e2..0000000 --- a/vega/algorithms/nlp/src/sample_process.py +++ /dev/null @@ -1,102 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Process txt.""" - -import re -from .tokenization import convert_tokens_to_ids - - -def process_one_example_p(tokenizer, vocab, text, max_seq_len=128): - """Process one testline.""" - textlist = list(text) - tokens = [] - for _, word in enumerate(textlist): - token = tokenizer.tokenize(word) - tokens.extend(token) - if len(tokens) >= max_seq_len - 1: - tokens = tokens[0:(max_seq_len - 2)] - ntokens = [] - segment_ids = [] - label_ids = [] - ntokens.append("[CLS]") - segment_ids.append(0) - for _, token in enumerate(tokens): - ntokens.append(token) - segment_ids.append(0) - ntokens.append("[SEP]") - segment_ids.append(0) - input_ids = convert_tokens_to_ids(vocab, ntokens) - input_mask = [1] * len(input_ids) - while len(input_ids) < max_seq_len: - input_ids.append(0) - input_mask.append(0) - segment_ids.append(0) - label_ids.append(0) - ntokens.append("**NULL**") - assert len(input_ids) == max_seq_len - assert len(input_mask) == max_seq_len - assert len(segment_ids) == max_seq_len - - feature = (input_ids, input_mask, segment_ids) - return feature - - -def label_generation(text="", probs=None, tag_to_index=None): - """Generate label.""" - data = [text] - probs = [probs] - result = [] - label2id = tag_to_index - id2label = [k for k, v in label2id.items()] - - for index, prob in enumerate(probs): - for v in prob[1:len(data[index]) + 1]: - result.append(id2label[int(v)]) - - labels = {} - start = None - index = 0 - for _, t in zip("".join(data), result): - if re.search("^[BS]", t): - if start is not None: - label = result[index - 1][2:] - if labels.get(label): - te_ = text[start:index] - labels[label][te_] = [[start, index - 1]] - else: - te_ = text[start:index] - labels[label] = {te_: [[start, index - 1]]} - start = index - if re.search("^O", t): - if start is not None: - label = result[index - 1][2:] - if labels.get(label): - te_ = text[start:index] - labels[label][te_] = [[start, index - 1]] - else: - te_ = text[start:index] - labels[label] = {te_: [[start, index - 1]]} - start = None - index += 1 - if start is not None: - label = result[start][2:] - if labels.get(label): - te_ = text[start:index] - labels[label][te_] = [[start, index - 1]] - else: - te_ = text[start:index] - labels[label] = {te_: [[start, index - 1]]} - return labels diff --git a/vega/algorithms/nlp/src/score.py b/vega/algorithms/nlp/src/score.py deleted file mode 100644 index 9c40412..0000000 --- a/vega/algorithms/nlp/src/score.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Calculate average F1 score among labels.""" - -import json - - -def get_f1_score_for_each_label(pre_lines, gold_lines, label): - """ - Get F1 score for each label. - - Args: - pre_lines: listed label info from pre_file. - gold_lines: listed label info from gold_file. - label: - - Returns: - F1 score for this label. - """ - TP = 0 - FP = 0 - FN = 0 - index = 0 - while index < len(pre_lines): - pre_line = pre_lines[index].get(label, {}) - gold_line = gold_lines[index].get(label, {}) - for sample in pre_line: - if sample in gold_line: - TP += 1 - else: - FP += 1 - for sample in gold_line: - if sample not in pre_line: - FN += 1 - index += 1 - f1 = 2 * TP / (2 * TP + FP + FN) - return f1 - - -def get_f1_score(labels, pre_file, gold_file): - """ - Get F1 scores for each label. - - Args: - labels: list of labels. - pre_file: prediction file. - gold_file: ground truth file. - - Returns: - average F1 score on all labels. - """ - pre_lines = [json.loads(line.strip())['label'] for line in open(pre_file) if line.strip()] - gold_lines = [json.loads(line.strip())['label'] for line in open(gold_file) if line.strip()] - if len(pre_lines) != len(gold_lines): - raise ValueError("pre file and gold file have different line count.") - f1_sum = 0 - for label in labels: - f1 = get_f1_score_for_each_label(pre_lines, gold_lines, label) - print('label: %s, F1: %.6f' % (label, f1)) - f1_sum += f1 - - return f1_sum / len(labels) - - -def get_result(labels, pre_file, gold_file): - """Construct the trainer of Bert.""" - avg = get_f1_score(labels, pre_file=pre_file, gold_file=gold_file) - print("avg F1: %.6f" % avg) diff --git a/vega/algorithms/nlp/src/squad_get_predictions.py b/vega/algorithms/nlp/src/squad_get_predictions.py deleted file mode 100644 index 1310f49..0000000 --- a/vega/algorithms/nlp/src/squad_get_predictions.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Get predictions for squad.""" - -import math -import collections -import six -from . import tokenization - - -def get_prelim_predictions(features, unique_id_to_result, n_best_size, max_answer_length): - """Get prelim predictions.""" - _PrelimPrediction = collections.namedtuple( - "PrelimPrediction", - ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]) - prelim_predictions = [] - # keep track of the minimum score of null start+end of position 0 - for (feature_index, feature) in enumerate(features): - if feature.unique_id not in unique_id_to_result: - continue - result = unique_id_to_result[feature.unique_id] - start_indexes = _get_best_indexes(result.start_logits, n_best_size) - end_indexes = _get_best_indexes(result.end_logits, n_best_size) - # if we could have irrelevant answers, get the min score of irrelevant - for start_index in start_indexes: - for end_index in end_indexes: - # We could hypothetically create invalid predictions, e.g., predict - # that the start of the span is in the question. We throw out all - # invalid predictions. - if start_index >= len(feature.tokens): - continue - if end_index >= len(feature.tokens): - continue - if start_index not in feature.token_to_orig_map: - continue - if end_index not in feature.token_to_orig_map: - continue - if not feature.token_is_max_context.get(start_index, False): - continue - if end_index < start_index: - continue - length = end_index - start_index + 1 - if length > max_answer_length: - continue - prelim_predictions.append( - _PrelimPrediction( - feature_index=feature_index, - start_index=start_index, - end_index=end_index, - start_logit=result.start_logits[start_index], - end_logit=result.end_logits[end_index])) - - prelim_predictions = sorted( - prelim_predictions, - key=lambda x: (x.start_logit + x.end_logit), - reverse=True) - return prelim_predictions - - -def get_nbest(prelim_predictions, features, example, n_best_size, do_lower_case): - """Get nbest predictions.""" - _NbestPrediction = collections.namedtuple( - "NbestPrediction", ["text", "start_logit", "end_logit"]) - - seen_predictions = {} - nbest = [] - for pred in prelim_predictions: - if len(nbest) >= n_best_size: - break - feature = features[pred.feature_index] - if pred.start_index > 0: # this is a non-null prediction - tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)] - orig_doc_start = feature.token_to_orig_map[pred.start_index] - orig_doc_end = feature.token_to_orig_map[pred.end_index] - orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)] - tok_text = " ".join(tok_tokens) - - # De-tokenize WordPieces that have been split off. - tok_text = tok_text.replace(" ##", "") - tok_text = tok_text.replace("##", "") - - # Clean whitespace - tok_text = tok_text.strip() - tok_text = " ".join(tok_text.split()) - orig_text = " ".join(orig_tokens) - final_text = get_final_text(tok_text, orig_text, do_lower_case) - if final_text in seen_predictions: - continue - - seen_predictions[final_text] = True - else: - final_text = "" - seen_predictions[final_text] = True - - nbest.append( - _NbestPrediction( - text=final_text, - start_logit=pred.start_logit, - end_logit=pred.end_logit)) - - # In very rare edge cases we could have no valid predictions. So we - # just create a nonce prediction in this case to avoid failure. - if not nbest: - nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) - - assert len(nbest) >= 1 - return nbest - - -def get_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case): - """Get final predictions.""" - example_index_to_features = collections.defaultdict(list) - for feature in all_features: - example_index_to_features[feature.example_index].append(feature) - - unique_id_to_result = {} - for result in all_results: - unique_id_to_result[result.unique_id] = result - all_predictions = collections.OrderedDict() - - for (example_index, example) in enumerate(all_examples): - features = example_index_to_features[example_index] - prelim_predictions = get_prelim_predictions(features, unique_id_to_result, n_best_size, max_answer_length) - nbest = get_nbest(prelim_predictions, features, example, n_best_size, do_lower_case) - - total_scores = [] - best_non_null_entry = None - for entry in nbest: - total_scores.append(entry.start_logit + entry.end_logit) - if not best_non_null_entry: - if entry.text: - best_non_null_entry = entry - - probs = _compute_softmax(total_scores) - - nbest_json = [] - for (i, entry) in enumerate(nbest): - output = collections.OrderedDict() - output["text"] = entry.text - output["probability"] = probs[i] - output["start_logit"] = entry.start_logit - output["end_logit"] = entry.end_logit - nbest_json.append(output) - - assert len(nbest_json) >= 1 - - all_predictions[example.qas_id] = nbest_json[0]["text"] - return all_predictions - - -def write_predictions(all_examples, all_features, all_results, n_best_size, - max_answer_length, do_lower_case): - """Write final predictions to the json file and log-odds of null if needed.""" - all_predictions = get_predictions(all_examples, all_features, all_results, - n_best_size, max_answer_length, do_lower_case) - return all_predictions - - -def get_final_text(pred_text, orig_text, do_lower_case): - """Project the tokenized prediction back to the original text.""" - def _strip_spaces(text): - ns_chars = [] - ns_to_s_map = collections.OrderedDict() - for (i, c) in enumerate(text): - if c == " ": - continue - ns_to_s_map[len(ns_chars)] = i - ns_chars.append(c) - ns_text = "".join(ns_chars) - return (ns_text, ns_to_s_map) - - tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case) - tok_text = " ".join(tokenizer.tokenize(orig_text)) - - start_position = tok_text.find(pred_text) - if start_position == -1: - return orig_text - end_position = start_position + len(pred_text) - 1 - - (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) - (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) - - if len(orig_ns_text) != len(tok_ns_text): - return orig_text - - tok_s_to_ns_map = {} - for (i, tok_index) in six.iteritems(tok_ns_to_s_map): - tok_s_to_ns_map[tok_index] = i - - orig_start_position = None - if start_position in tok_s_to_ns_map: - ns_start_position = tok_s_to_ns_map[start_position] - if ns_start_position in orig_ns_to_s_map: - orig_start_position = orig_ns_to_s_map[ns_start_position] - - if orig_start_position is None: - return orig_text - - orig_end_position = None - if end_position in tok_s_to_ns_map: - ns_end_position = tok_s_to_ns_map[end_position] - if ns_end_position in orig_ns_to_s_map: - orig_end_position = orig_ns_to_s_map[ns_end_position] - - if orig_end_position is None: - return orig_text - - output_text = orig_text[orig_start_position:(orig_end_position + 1)] - return output_text - - -def _get_best_indexes(logits, n_best_size): - """Get the n-best logits from a list.""" - index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) - - best_indexes = [] - for (i, score) in enumerate(index_and_score): - if i >= n_best_size: - break - best_indexes.append(score[0]) - return best_indexes - - -def _compute_softmax(scores): - """Compute softmax probability over raw logits.""" - if not scores: - return [] - - max_score = None - for score in scores: - if max_score is None or score > max_score: - max_score = score - - exp_scores = [] - total_sum = 0.0 - for score in scores: - x = math.exp(score - max_score) - exp_scores.append(x) - total_sum += x - - probs = [] - for score in exp_scores: - probs.append(score / total_sum) - return probs diff --git a/vega/algorithms/nlp/src/squad_postprocess.py b/vega/algorithms/nlp/src/squad_postprocess.py deleted file mode 100644 index e102b27..0000000 --- a/vega/algorithms/nlp/src/squad_postprocess.py +++ /dev/null @@ -1,110 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Evaluation script for SQuAD v1.1.""" - -from collections import Counter -import string -import re -import json -import sys - - -def normalize_answer(s): - """Low text and remove punctuation, articles and extra whitespace.""" - - def remove_articles(text): - """Construct the trainer of Bert.""" - return re.sub(r'\b(a|an|the)\b', ' ', text) - - def white_space_fix(text): - """Construct the trainer of Bert.""" - return ' '.join(text.split()) - - def remove_punc(text): - """Construct the trainer of Bert.""" - exclude = set(string.punctuation) - return ''.join(ch for ch in text if ch not in exclude) - - def lower(text): - """Construct the trainer of Bert.""" - return text.lower() - - return white_space_fix(remove_articles(remove_punc(lower(s)))) - - -def f1_score(prediction, ground_truth): - """Calculate f1 score.""" - prediction_tokens = normalize_answer(prediction).split() - ground_truth_tokens = normalize_answer(ground_truth).split() - common = Counter(prediction_tokens) & Counter(ground_truth_tokens) - num_same = sum(common.values()) - if num_same == 0: - return 0 - precision = 1.0 * num_same / len(prediction_tokens) - recall = 1.0 * num_same / len(ground_truth_tokens) - f1 = (2 * precision * recall) / (precision + recall) - return f1 - - -def exact_match_score(prediction, ground_truth): - """Construct the trainer of Bert.""" - return normalize_answer(prediction) == normalize_answer(ground_truth) - - -def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): - """Construct the trainer of Bert.""" - scores_for_ground_truths = [] - for ground_truth in ground_truths: - score = metric_fn(prediction, ground_truth) - scores_for_ground_truths.append(score) - return max(scores_for_ground_truths) - - -def evaluate(dataset, predictions): - """Do evaluation.""" - f1 = exact_match = total = 0 - for article in dataset: - for paragraph in article['paragraphs']: - for qa in paragraph['qas']: - total += 1 - if qa['id'] not in predictions: - message = 'Unanswered question ' + qa['id'] + \ - ' will receive score 0.' - print(message, file=sys.stderr) - continue - ground_truths = list(map(lambda x: x['text'], qa['answers'])) - if not ground_truths: - continue - prediction = predictions[qa['id']] - exact_match += metric_max_over_ground_truths( - exact_match_score, prediction, ground_truths) - f1 += metric_max_over_ground_truths( - f1_score, prediction, ground_truths) - - exact_match = 100.0 * exact_match / total - f1 = 100.0 * f1 / total - return {'exact_match': exact_match, 'f1': f1} - - -def SQuad_postprocess(dataset_file, all_predictions, output_metrics="output.json"): - """Construct the trainer of Bert.""" - with open(dataset_file) as ds: - dataset_json = json.load(ds) - dataset = dataset_json['data'] - re_json = evaluate(dataset, all_predictions) - print(json.dumps(re_json)) - with open(output_metrics, 'w') as wr: - wr.write(json.dumps(re_json)) diff --git a/vega/algorithms/nlp/src/tokenization.py b/vega/algorithms/nlp/src/tokenization.py deleted file mode 100644 index ffc4a52..0000000 --- a/vega/algorithms/nlp/src/tokenization.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Tokenization.""" - -import unicodedata -import collections - - -def convert_to_unicode(text): - """ - Convert text into unicode type. - - Args: - text: input str. - - Returns: - input str in unicode. - """ - ret = text - if isinstance(text, str): - ret = text - elif isinstance(text, bytes): - ret = text.decode("utf-8", "ignore") - else: - raise ValueError("Unsupported string type: %s" % (type(text))) - return ret - - -def vocab_to_dict_key_token(vocab_file): - """Load a vocab file into a dict, key is token.""" - vocab = collections.OrderedDict() - index = 0 - with open(vocab_file, "r") as reader: - while True: - token = convert_to_unicode(reader.readline()) - if not token: - break - token = token.strip() - vocab[token] = index - index += 1 - return vocab - - -def vocab_to_dict_key_id(vocab_file): - """Load a vocab file into a dict, key is id.""" - vocab = collections.OrderedDict() - index = 0 - with open(vocab_file, "r") as reader: - while True: - token = convert_to_unicode(reader.readline()) - if not token: - break - token = token.strip() - vocab[index] = token - index += 1 - return vocab - - -def whitespace_tokenize(text): - """Run basic whitespace cleaning and splitting on a piece of text.""" - text = text.strip() - if not text: - return [] - tokens = text.split() - return tokens - - -def convert_tokens_to_ids(vocab_file, tokens): - """ - Convert tokens to ids. - - Args: - vocab_file: path to vocab.txt. - tokens: list of tokens. - - Returns: - list of ids. - """ - vocab_dict = vocab_to_dict_key_token(vocab_file) - output = [] - for token in tokens: - output.append(vocab_dict[token]) - return output - - -def convert_ids_to_tokens(vocab_file, ids): - """ - Convert ids to tokens. - - Args: - vocab_file: path to vocab.txt. - ids: list of ids. - - Returns: - list of tokens. - """ - vocab_dict = vocab_to_dict_key_id(vocab_file) - output = [] - for _id in ids: - output.append(vocab_dict[_id]) - return output - - -class FullTokenizer(): - """Construct the trainer of Bert.""" - - def __init__(self, vocab_file, do_lower_case=True): - self.vocab_dict = vocab_to_dict_key_token(vocab_file) - self.do_lower_case = do_lower_case - self.basic_tokenize = BasicTokenizer(do_lower_case) - self.wordpiece_tokenize = WordpieceTokenizer(self.vocab_dict) - - def tokenize(self, text): - """ - Do full tokenization. - - Args: - text: str of text. - - Returns: - list of tokens. - """ - tokens_ret = [] - text = convert_to_unicode(text) - for tokens in self.basic_tokenize.tokenize(text): - wordpiece_tokens = self.wordpiece_tokenize.tokenize(tokens) - tokens_ret.extend(wordpiece_tokens) - return tokens_ret - - -class BasicTokenizer(): - """Construct the trainer of Bert.""" - - def __init__(self, do_lower_case=True): - self.do_lower_case = do_lower_case - - def tokenize(self, text): - """ - Do basic tokenization. - - Args: - text: text in unicode. - - Returns: - a list of tokens split from text - """ - text = self._clean_text(text) - text = self._tokenize_chinese_chars(text) - - orig_tokens = whitespace_tokenize(text) - split_tokens = [] - for token in orig_tokens: - if self.do_lower_case: - token = token.lower() - token = self._run_strip_accents(token) - aaa = self._run_split_on_punc(token) - split_tokens.extend(aaa) - - output_tokens = whitespace_tokenize(" ".join(split_tokens)) - return output_tokens - - def _run_strip_accents(self, text): - """Construct the trainer of Bert.""" - text = unicodedata.normalize("NFD", text) - output = [] - for char in text: - cat = unicodedata.category(char) - if cat == "Mn": - continue - output.append(char) - return "".join(output) - - def _run_split_on_punc(self, text): - """Split punctuation on a piece of text.""" - i = 0 - start_new_word = True - output = [] - for char in text: - if _is_punctuation(char): - output.append([char]) - start_new_word = True - else: - if start_new_word: - output.append([]) - start_new_word = False - output[-1].append(char) - i += 1 - return ["".join(x) for x in output] - - def _clean_text(self, text): - """Perform invalid character removal and whitespace cleanup on text.""" - output = [] - for char in text: - cp = ord(char) - if cp == 0 or cp == 0xfffd or _is_control(char): - continue - if _is_whitespace(char): - output.append(" ") - else: - output.append(char) - return "".join(output) - - def _tokenize_chinese_chars(self, text): - """Add whitespace around any CJK character.""" - output = [] - for char in text: - cp = ord(char) - if self._is_chinese_char(cp): - output.append(" ") - output.append(char) - output.append(" ") - else: - output.append(char) - return "".join(output) - - def _is_chinese_char(self, cp): - """Check whether CP is the codepoint of a CJK character.""" - # This defines a "chinese character" as anything in the CJK Unicode block: - # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) - # - # Note that the CJK Unicode block is NOT all Japanese and Korean characters, - # despite its name. The modern Korean Hangul alphabet is a different block, - # as is Japanese Hiragana and Katakana. Those alphabets are used to write - # space-separated words, so they are not treated specially and handled - # like the all of the other languages. - if ((0x4E00 <= cp <= 0x9FFF) - or (0x3400 <= cp <= 0x4DBF) - or (0x20000 <= cp <= 0x2A6DF) - or (0x2A700 <= cp <= 0x2B73F) - or (0x2B740 <= cp <= 0x2B81F) - or (0x2B820 <= cp <= 0x2CEAF) - or (0xF900 <= cp <= 0xFAFF) - or (0x2F800 <= cp <= 0x2FA1F)): - return True - - return False - - -class WordpieceTokenizer(): - """Construct the trainer of Bert.""" - - def __init__(self, vocab): - self.vocab_dict = vocab - - def tokenize(self, tokens): - """ - Do word-piece tokenization. - - Args: - tokens: a word. - - Returns: - a list of tokens that can be found in vocab dict. - """ - output_tokens = [] - tokens = convert_to_unicode(tokens) - for token in whitespace_tokenize(tokens): - chars = list(token) - len_chars = len(chars) - start = 0 - end = len_chars - while start < len_chars: - while start < end: - substr = "".join(token[start:end]) - if start != 0: - substr = "##" + substr - if substr in self.vocab_dict: - output_tokens.append(substr) - start = end - end = len_chars - else: - end = end - 1 - if start == end and start != len_chars: - output_tokens.append("[UNK]") - break - return output_tokens - - -def _is_whitespace(char): - """Check whether `chars` is a whitespace character.""" - # \t, \n, and \r are technically control characters but we treat them - # as whitespace since they are generally considered as such. - whitespace_char = [" ", "\t", "\n", "\r"] - if char in whitespace_char: - return True - cat = unicodedata.category(char) - if cat == "Zs": - return True - return False - - -def _is_control(char): - """Check whether `chars` is a control character.""" - # These are technically control characters but we count them as whitespace - # characters. - control_char = ["\t", "\n", "\r"] - if char in control_char: - return False - cat = unicodedata.category(char) - if cat in ("Cc", "Cf"): - return True - return False - - -def _is_punctuation(char): - """Check whether `chars` is a punctuation character.""" - cp = ord(char) - # We treat all non-letter/number ASCII as punctuation. - # Characters such as "^", "$", and "`" are not in the Unicode - # Punctuation class but we treat them as punctuation anyways, for - # consistency. - if ((33 <= cp <= 47) or (58 <= cp <= 64) or - (91 <= cp <= 96) or (123 <= cp <= 126)): - return True - cat = unicodedata.category(char) - if cat.startswith("P"): - return True - return False diff --git a/vega/algorithms/nlp/src/utils.py b/vega/algorithms/nlp/src/utils.py deleted file mode 100644 index 0b31c91..0000000 --- a/vega/algorithms/nlp/src/utils.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright 2020 Huawei Technologies Co., Ltd -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================ - -"""Functional Cells used in Bert finetune and evaluation.""" - -import os -import math -import collections -import numpy as np -import mindspore.nn as nn -from mindspore import log as logger -from mindspore.ops import operations as P -from mindspore.common.tensor import Tensor -from mindspore.common import dtype as mstype -from mindspore.train.callback import Callback -from mindspore.nn.learning_rate_schedule import LearningRateSchedule, PolynomialDecayLR, WarmUpLR - - -class CrossEntropyCalculation(nn.Cell): - """Cross Entropy loss.""" - - def __init__(self, is_training=True): - super(CrossEntropyCalculation, self).__init__() - self.onehot = P.OneHot() - self.on_value = Tensor(1.0, mstype.float32) - self.off_value = Tensor(0.0, mstype.float32) - self.reduce_sum = P.ReduceSum() - self.reduce_mean = P.ReduceMean() - self.reshape = P.Reshape() - self.last_idx = (-1,) - self.neg = P.Neg() - self.cast = P.Cast() - self.is_training = is_training - - def construct(self, logits, label_ids, num_labels): - """Construct the trainer of Bert.""" - if self.is_training: - label_ids = self.reshape(label_ids, self.last_idx) - one_hot_labels = self.onehot(label_ids, num_labels, self.on_value, self.off_value) - per_example_loss = self.neg(self.reduce_sum(one_hot_labels * logits, self.last_idx)) - loss = self.reduce_mean(per_example_loss, self.last_idx) - return_value = self.cast(loss, mstype.float32) - else: - return_value = logits * 1.0 - return return_value - - -def make_directory(path: str): - """Make directory.""" - if path is None or not isinstance(path, str) or path.strip() == "": - logger.error("The path(%r) is invalid type.", path) - raise TypeError("Input path is invalid type") - - # convert the relative paths - path = os.path.realpath(path) - logger.debug("The abs path is %r", path) - - # check the path is exist and write permissions? - if os.path.exists(path): - real_path = path - else: - # All exceptions need to be caught because create directory maybe have some limit(permissions) - logger.debug("The directory(%s) doesn't exist, will create it", path) - try: - os.makedirs(path, exist_ok=True) - real_path = path - except PermissionError as e: - logger.error("No write permission on the directory(%r), error = %r", path, e) - raise TypeError("No write permission on the directory.") - return real_path - - -class LossCallBack(Callback): - """Monitor the loss in training.""" - - def __init__(self, dataset_size=-1): - super(LossCallBack, self).__init__() - self._dataset_size = dataset_size - - def step_end(self, run_context): - """Print loss after each step.""" - cb_params = run_context.original_args() - if self._dataset_size > 0: - percent, epoch_num = math.modf(cb_params.cur_step_num / self._dataset_size) - if percent == 0: - percent = 1 - epoch_num -= 1 - print("epoch: {}, current epoch percent: {}, step: {}, outputs are {}" - .format(int(epoch_num), "%.3f" % percent, cb_params.cur_step_num, str(cb_params.net_outputs)), - flush=True) - else: - print("epoch: {}, step: {}, outputs are {}".format(cb_params.cur_epoch_num, cb_params.cur_step_num, - str(cb_params.net_outputs)), flush=True) - - -def LoadNewestCkpt(load_finetune_checkpoint_dir, steps_per_epoch, epoch_num, prefix): - """Find the ckpt finetune generated and load it into eval network.""" - files = os.listdir(load_finetune_checkpoint_dir) - pre_len = len(prefix) - max_num = 0 - for filename in files: - name_ext = os.path.splitext(filename) - if name_ext[-1] != ".ckpt": - continue - if filename.find(prefix) == 0 and not filename[pre_len].isalpha(): - index = filename[pre_len:].find("-") - if index == 0 and max_num == 0: - load_finetune_checkpoint_path = os.path.join(load_finetune_checkpoint_dir, filename) - elif index not in (0, -1): - name_split = name_ext[-2].split('_') - if (steps_per_epoch != int(name_split[len(name_split) - 1])) \ - or (epoch_num != int(filename[pre_len + index + 1:pre_len + index + 2])): - continue - num = filename[pre_len + 1:pre_len + index] - if int(num) > max_num: - max_num = int(num) - load_finetune_checkpoint_path = os.path.join(load_finetune_checkpoint_dir, filename) - return load_finetune_checkpoint_path - - -class BertLearningRate(LearningRateSchedule): - """Warmup-decay learning rate for Bert network.""" - - def __init__(self, learning_rate, end_learning_rate, warmup_steps, decay_steps, power): - super(BertLearningRate, self).__init__() - self.warmup_flag = False - if warmup_steps > 0: - self.warmup_flag = True - self.warmup_lr = WarmUpLR(learning_rate, warmup_steps) - self.decay_lr = PolynomialDecayLR(learning_rate, end_learning_rate, decay_steps, power) - self.warmup_steps = Tensor(np.array([warmup_steps]).astype(np.float32)) - - self.greater = P.Greater() - self.one = Tensor(np.array([1.0]).astype(np.float32)) - self.cast = P.Cast() - - def construct(self, global_step): - """Construct the trainer of Bert.""" - decay_lr = self.decay_lr(global_step) - if self.warmup_flag: - is_warmup = self.cast(self.greater(self.warmup_steps, global_step), mstype.float32) - warmup_lr = self.warmup_lr(global_step) - lr = (self.one - is_warmup) * decay_lr + is_warmup * warmup_lr - else: - lr = decay_lr - return lr - - -def convert_labels_to_index(label_list): - """Convert label_list to indices for NER task.""" - label2id = collections.OrderedDict() - label2id["O"] = 0 - prefix = ["S_", "B_", "M_", "E_"] - index = 0 - for label in label_list: - for pre in prefix: - index += 1 - sub_label = pre + label - label2id[sub_label] = index - return label2id - - -def _get_poly_lr(global_step, lr_init, lr_end, lr_max, warmup_steps, total_steps, poly_power): - """ - Generate learning rate array. - - Args: - global_step(int): current step - lr_init(float): init learning rate - lr_end(float): end learning rate - lr_max(float): max learning rate - warmup_steps(int): number of warmup epochs - total_steps(int): total epoch of training - poly_power(int): poly learning rate power - - Returns: - np.array, learning rate array - """ - lr_each_step = [] - if warmup_steps != 0: - inc_each_step = (float(lr_max) - float(lr_init)) / float(warmup_steps) - else: - inc_each_step = 0 - for i in range(total_steps): - if i < warmup_steps: - lr = float(lr_init) + inc_each_step * float(i) - else: - base = (1.0 - (float(i) - float(warmup_steps)) / (float(total_steps) - float(warmup_steps))) - lr = float(lr_max - lr_end) * (base ** poly_power) - lr = lr + lr_end - if lr < 0.0: - lr = 0.0 - lr_each_step.append(lr) - - learning_rate = np.array(lr_each_step).astype(np.float32) - current_step = global_step - learning_rate = learning_rate[current_step:] - return learning_rate - - -def get_bert_thor_lr(lr_max=0.0034, lr_min=3.244e-05, lr_power=1.0, lr_total_steps=30000): - """Construct the trainer of Bert.""" - learning_rate = _get_poly_lr(global_step=0, lr_init=0.0, lr_end=lr_min, lr_max=lr_max, warmup_steps=0, - total_steps=lr_total_steps, poly_power=lr_power) - return Tensor(learning_rate) - - -def get_bert_thor_damping(damping_max=5e-2, damping_min=1e-6, damping_power=1.0, damping_total_steps=30000): - """Construct the trainer of Bert.""" - damping = _get_poly_lr(global_step=0, lr_init=0.0, lr_end=damping_min, lr_max=damping_max, warmup_steps=0, - total_steps=damping_total_steps, poly_power=damping_power) - return Tensor(damping) diff --git a/vega/common/__init__.py b/vega/common/__init__.py index bfca7ab..1b7c746 100644 --- a/vega/common/__init__.py +++ b/vega/common/__init__.py @@ -13,3 +13,4 @@ from .message_client import MessageClient from .arg_parser import argment_parser from .searchable import Searchable, SearchableRegister, space, change_space +from .wrappers import callbacks diff --git a/vega/common/arg_parser.py b/vega/common/arg_parser.py index c1e1df3..c42186d 100644 --- a/vega/common/arg_parser.py +++ b/vega/common/arg_parser.py @@ -1,19 +1,36 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Arg parser.""" import argparse -__all__ = ["argment_parser"] +__all__ = ["argment_parser", "str2bool"] + + +def str2bool(value): + """Convert string to boolean.""" + value = str(value) + if value.lower() in ("yes", "true", "t", "y", "1"): + return True + elif value.lower() in ("no", "false", "f", "n", "0"): + return False + else: + raise argparse.ArgumentTypeError("Boolean value expected.") def argment_parser(desc=None): @@ -29,18 +46,14 @@ def _get_help_string(self, action): return help items = [] - - if action.type and action.default not in [True, False]: - items.append(f"type: {action.type.__name__}") - if action.choices: items.append(f"choices: {'|'.join(action.choices)}") - if '%(default)' not in action.help and action.default is not None and action.default not in [True, False]: + if "%(default)" not in action.help and action.default is not None and action.default not in [True, False]: if action.default is not argparse.SUPPRESS: defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE] if action.option_strings or action.nargs in defaulting_nargs: - items.append('default: %(default)s') + items.append("default: %(default)s") if items: help += " (" + ", ".join(items) + ")" diff --git a/vega/common/backend_register.py b/vega/common/backend_register.py index d9a9a02..0af3f64 100644 --- a/vega/common/backend_register.py +++ b/vega/common/backend_register.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Backend Register.""" @@ -80,14 +86,7 @@ def set_backend(backend='pytorch', device_category='GPU'): register_networks(backend) register_modelzoo(backend) - # register ascend automl modules - ascend_automl_path = os.environ.get("ASCEND_AUTOML_PATH") - if ascend_automl_path: - sys.path.append(ascend_automl_path) - try: - import ascend_automl - except ImportError: - pass + import_extension_module() # backup config from vega.common.config_serializable import backup_configs backup_configs() @@ -132,3 +131,11 @@ def get_devices(): if "CUDA_VISIBLE_DEVICES" in os.environ: device_id = int(os.environ["CUDA_VISIBLE_DEVICES"].split(",")[0]) return "{}:{}".format(device_category.lower(), device_id) + + +def import_extension_module(): + """Import extension module.""" + try: + import ascend_automl + except ImportError: + pass diff --git a/vega/common/check.py b/vega/common/check.py index a99bac2..c64903d 100644 --- a/vega/common/check.py +++ b/vega/common/check.py @@ -1,22 +1,19 @@ +# -*- coding:utf-8 -*- + # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. +# http://www.apache.org/licenses/LICENSE-2.0 # -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Utils for checking yml.""" diff --git a/vega/common/class_factory.py b/vega/common/class_factory.py index abff153..9d58e67 100644 --- a/vega/common/class_factory.py +++ b/vega/common/class_factory.py @@ -1,15 +1,22 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Management class registration and bind configuration properties, provides the type of class supported.""" import logging +import importlib from copy import deepcopy from enum import Enum from inspect import isfunction, isclass, signature as sig @@ -42,6 +49,7 @@ class ClassType(object): SEARCHSPACE = 'searchspace' PACKAGE = "package" GENERATOR = "generator" + CALLBACK_FN = 'trainer.callback.fn' class SearchSpaceType(Enum): @@ -151,6 +159,11 @@ def is_exists(cls, type_name, cls_name=None): @classmethod def _import_pkg(cls, type_name, cls_name): type_cls = "{}:{}".format(type_name, cls_name) + if cls_name.startswith('vega_hub'): + pkg_name, fn_name = '.'.join(cls_name.split('.')[:-1]), cls_name.split('.')[-1] + pkg = importlib.import_module(pkg_name) + cls.register_cls(getattr(pkg, fn_name), type_name, alias=cls_name) + return pkg = cls.__registry__.get(ClassType.PACKAGE).get( type_cls) or cls.__registry__.get(ClassType.PACKAGE).get(cls_name) if pkg: diff --git a/vega/common/config.py b/vega/common/config.py index 78ea533..a52543f 100644 --- a/vega/common/config.py +++ b/vega/common/config.py @@ -1,23 +1,29 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Load public configuration from yaml or py file and convert dictionary types to objects.""" -import yaml -import json -import copy -import os.path as osp import sys +import json import logging import traceback +import copy from importlib import import_module +import os.path as osp +import yaml class Config(dict): @@ -38,6 +44,9 @@ def __init__(self, *args, **kwargs): if arg.endswith('.yaml') or arg.endswith('.yml'): with open(arg) as f: raw_dict = yaml.safe_load(f) + if "abs_path" in kwargs: + file_path = osp.dirname(osp.abspath(arg)) + self._replace_abs_path(file_path, raw_dict) _dict2config(self, raw_dict) elif arg.endswith('.py'): module_name = osp.basename(arg)[:-3] @@ -116,9 +125,9 @@ def dump_yaml(self, output_file, **kwargs): with open(output_file, "w") as f: data = json.loads(json.dumps(self)) yaml.dump(data, f, indent=4, Dumper=SafeDumper, sort_keys=False, **kwargs) - except Exception: - logging.error(f"Failed to dump config to file: {output_file}.") - logging.error(traceback.format_exc()) + except Exception as e: + logging.error(f"Failed to dump config to file: {output_file}. error message: {e}") + logging.debug(traceback.format_exc()) def __setattr__(self, key, value): """Get a object attr `key` with `value`. @@ -148,6 +157,14 @@ def __deepcopy__(self, memo): """ return Config(copy.deepcopy(dict(self))) + def _replace_abs_path(self, file_path, raw_dict): + if isinstance(raw_dict, dict): + for k, v in raw_dict.items(): + if isinstance(v, dict): + self._replace_abs_path(file_path, v) + elif isinstance(v, str) and v.startswith("./"): + raw_dict[k] = osp.join(file_path, v[2:]) + def _dict2config(config, dic): """Convert dictionary to config. diff --git a/vega/common/config_serializable.py b/vega/common/config_serializable.py index fe5bb78..8edfe0b 100644 --- a/vega/common/config_serializable.py +++ b/vega/common/config_serializable.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Convert class to string.""" import json import logging -import numpy as np from copy import deepcopy from inspect import ismethod, isfunction +import numpy as np +from vega.common.check import valid_rule from .config import Config from .class_factory import ClassFactory -from vega.common.check import valid_rule __all__ = ["ConfigSerializable", "backup_configs"] diff --git a/vega/common/consts.py b/vega/common/consts.py index 1a84915..a56407d 100644 --- a/vega/common/consts.py +++ b/vega/common/consts.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """System const variable.""" from enum import IntEnum, unique, Enum diff --git a/vega/common/dag.py b/vega/common/dag.py new file mode 100644 index 0000000..d553317 --- /dev/null +++ b/vega/common/dag.py @@ -0,0 +1,93 @@ +"""DAG class.""" + +from collections import deque +from collections import OrderedDict + + +class DAG: + """DAG.""" + + def __init__(self): + """Init DAG.""" + self.nodes = OrderedDict() + + def add_node(self, node): + """Add node.""" + if node not in self.nodes: + self.nodes[node] = set() + + def remove_node(self, node): + """Remove node.""" + if node in self.nodes: + self.nodes.pop(node) + + for pre_node, nodes in iter(self.nodes.items()): + if node in nodes: + nodes.remove(node) + + def add_edge(self, pre_node, node): + """Add edge.""" + if pre_node not in self.nodes or node not in self.nodes: + return + self.nodes[pre_node].add(node) + + def remove_edge(self, pre_node, node): + """Remove edge.""" + if pre_node in self.nodes and node in self.nodes[pre_node]: + self.nodes[pre_node].remove(node) + + def from_dict(self, dict_value): + """Construct DAG from dict.""" + self.nodes = OrderedDict() + for node in iter(dict_value.keys()): + self.add_node(node) + for pre_node, nodes in iter(dict_value.items()): + if not isinstance(nodes, list): + raise TypeError('dict values must be lists') + for node in nodes: + self.add_edge(pre_node, node) + + def next_nodes(self, node): + """Get all successor of the node.""" + return list(self.nodes[node]) + + def pre_nodes(self, node): + """Get all predecessor of the node.""" + return [item for item in self.nodes if node in self.nodes[item]] + + def topological_sort(self): + """Topological sort.""" + in_degree = {node: 0 for node in self.nodes} + out_degree = {node: 0 for node in self.nodes} + for node in self.nodes: + out_degree[node] = len(node) + for next_node in self.nodes[node]: + in_degree[next_node] += 1 + ret = [] + stack = deque() + for node in in_degree: + if in_degree[node] == 0: + stack.append(node) + while len(stack) > 0: + node = stack.pop() + for item in self.nodes[node]: + in_degree[item] -= 1 + if in_degree[item] == 0: + stack.append(item) + ret.append(node) + if len(ret) != len(self.nodes): + raise ValueError("Not a directed acyclic graph") + return ret + + def ind_nodes(self): + """Independent nodes.""" + in_degree = {node: 0 for node in self.nodes} + for node in self.nodes: + for next_node in self.nodes[node]: + in_degree[next_node] += 1 + ret = set(node for node in self.nodes if in_degree[node] == 0) + return ret + + def size(self): + """Return the size of graph.""" + return len(self.nodes) diff --git a/vega/common/file_ops.py b/vega/common/file_ops.py index 62d8aef..06cb751 100644 --- a/vega/common/file_ops.py +++ b/vega/common/file_ops.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """FileOps class.""" import os @@ -75,7 +81,7 @@ def join_path(cls, *args): return prefix + tail @classmethod - def dump_pickle(cls, obj, filename): + def dump_pickle(cls, obj, filename, protocol=None): """Dump a object to a file using pickle. :param object obj: target object. @@ -85,10 +91,10 @@ def dump_pickle(cls, obj, filename): if not os.path.isfile(filename): cls.make_base_dir(filename) with open(filename, "wb") as f: - pickle.dump(obj, f) + pickle.dump(obj, f, protocol=protocol) @classmethod - def load_pickle(cls, filename): + def load_pickle(cls, filename, fix_imports=True, encoding="ASCII", errors="strict"): """Load a pickle file and return the object. :param str filename: target pickle file path. @@ -98,8 +104,11 @@ def load_pickle(cls, filename): """ if not os.path.isfile(filename): return None + from vega.common.general import General + from vega.security.load_pickle import restricted_loads with open(filename, "rb") as f: - return pickle.load(f) + return restricted_loads( + f, fix_imports=fix_imports, encoding=encoding, errors=errors, security=General.security) @classmethod def copy_folder(cls, src, dst): @@ -143,9 +152,6 @@ def copy_file(cls, src, dst): if dst is None or dst == "": return try: - if ":" in src: - cls.http_download(src, dst) - return if os.path.isfile(src): shutil.copy(src, dst) else: @@ -167,57 +173,29 @@ def download_dataset(cls, src_path, local_path=None): """ if src_path is None: raise FileNotFoundError("Dataset path is None, please set dataset path in config file.") - if src_path.lower().startswith("http://") or src_path.lower().startswith("https://"): - if local_path is None: - local_path = os.path.abspath("./temp") - cls.make_dir(local_path) - base_name = os.path.basename(src_path) - local_path = os.path.join(local_path, base_name) - logger.debug("Downloading, from={}, to={}.".format(src_path, local_path)) - cls.http_download(src_path, local_path, unzip=True) - return os.path.dirname(local_path) if os.path.exists(src_path): return src_path else: raise FileNotFoundError('Path is not existed, path={}'.format(src_path)) @classmethod - def http_download(cls, src, dst, unzip=False): - """Download data from http or https web site. + def download_pretrain_model(cls, src_file, local_path=None): + """Download dataset from http or https web site, return path. - :param src: the data path - :type src: str - :param dst: the data path - :type dst: str + :param src_path: the data path + :type src_path: str + :param src_file: the local path + :type src_file: str :raises FileNotFoundError: if the file path is not exist, an error will raise + :return: the final data path + :rtype: str """ - from six.moves import urllib - import fcntl - - signal_file = cls.join_path(os.path.dirname(dst), ".{}.signal".format(os.path.basename(dst))) - if not os.path.isfile(signal_file): - with open(signal_file, 'w') as fp: - fp.write('{}'.format(0)) - - with open(signal_file, 'r+') as fp: - fcntl.flock(fp, fcntl.LOCK_EX) - signal = int(fp.readline().strip()) - if signal == 0: - try: - urllib.request.urlretrieve(src, dst) - logger.info("Downloaded completely.") - except (urllib.error.URLError, IOError) as e: - logger.error("Faild download, msg={}".format(str(e))) - raise e - if unzip is True and dst.endswith(".tar.gz"): - logger.info("Untar dataset file, file={}".format(dst)) - cls._untar(dst) - logger.info("Untar dataset file completely.") - with open(signal_file, 'w') as fn: - fn.write('{}'.format(1)) - else: - logging.debug("File is already downloaded, file={}".format(dst)) - fcntl.flock(fp, fcntl.LOCK_UN) + if src_file is None: + raise FileNotFoundError("Path of pretrain model is None, please set correct path.") + if os.path.isfile(src_path): + return src_path + else: + raise FileNotFoundError('Model is not existed, path={}'.format(src_path)) @classmethod def _untar(cls, src, dst=None): diff --git a/vega/common/general.py b/vega/common/general.py index 09e15ce..5ff8219 100644 --- a/vega/common/general.py +++ b/vega/common/general.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default general.""" import os @@ -125,4 +131,4 @@ class General(ConfigSerializable): device_evaluate_before_train = True ms_execute_mode = 0 # 0-GRAPH_MODE 1-PYNATIVE_MODE dataset_sink_mode = True - security_setting = None + security = False diff --git a/vega/common/json_coder.py b/vega/common/json_coder.py index a37d573..c92332c 100644 --- a/vega/common/json_coder.py +++ b/vega/common/json_coder.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Utils tools.""" diff --git a/vega/common/message_client.py b/vega/common/message_client.py index b528c00..92fa7ae 100644 --- a/vega/common/message_client.py +++ b/vega/common/message_client.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Message Server.""" diff --git a/vega/common/message_server.py b/vega/common/message_server.py index 4a6a2f9..d72def4 100644 --- a/vega/common/message_server.py +++ b/vega/common/message_server.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Message Server.""" @@ -92,7 +98,7 @@ def _monitor_socket(socket, handlers): def query_task_info(): """Get task message.""" - from vega.common.task_ops import TaskOps + from vega.common import TaskOps return { "result": "success", "task_id": TaskOps().task_id, diff --git a/vega/common/parameter_sharing.py b/vega/common/parameter_sharing.py index 28c491c..3da7885 100644 --- a/vega/common/parameter_sharing.py +++ b/vega/common/parameter_sharing.py @@ -1,20 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is Search on Network.""" import json import logging -import vega import hashlib +from threading import Lock +import vega from vega.common import TaskOps, FileOps from vega.common.utils import singleton -from threading import Lock _lock = Lock() @@ -22,7 +28,7 @@ def calculated_uuid(value): """Create uuid by static names.""" value = str(json.dumps(value)) if isinstance(value, dict) else str(value) - return hashlib.md5(value.encode()).hexdigest() # hash(value) + return hashlib.sha256(value.encode()).hexdigest() # hash(value) def add_share_file_path(uuid, file_name): diff --git a/vega/common/pareto_front.py b/vega/common/pareto_front.py index d5aa037..f83998c 100644 --- a/vega/common/pareto_front.py +++ b/vega/common/pareto_front.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Pareto front.""" @@ -46,7 +52,7 @@ def normal_selection(outs, max_nums, choice_column=0, seed=None): data = outs[:, choice_column].tolist() prob = [round(np.log(i + 1e-2), 2) for i in range(1, len(data) + 1)] prob_temp = prob - for idx, out in enumerate(data): + for _, out in enumerate(data): sorted_ind = np.argsort(out) for idx, ind in enumerate(sorted_ind): prob[ind] += prob_temp[idx] diff --git a/vega/common/searchable.py b/vega/common/searchable.py index 0302ba2..009f436 100644 --- a/vega/common/searchable.py +++ b/vega/common/searchable.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is Search on Network.""" from vega.common.utils import singleton diff --git a/vega/common/task_ops.py b/vega/common/task_ops.py index 629e975..5287ffd 100644 --- a/vega/common/task_ops.py +++ b/vega/common/task_ops.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """TaskOps class. diff --git a/vega/common/user_config.py b/vega/common/user_config.py index c218dcd..d40335b 100644 --- a/vega/common/user_config.py +++ b/vega/common/user_config.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Contains Default and User configuration.""" from copy import deepcopy +from vega.common.check import valid_rule from .config import Config from .utils import singleton, update_dict -from vega.common.check import valid_rule @singleton diff --git a/vega/common/utils.py b/vega/common/utils.py index 247c6b0..1487d1c 100644 --- a/vega/common/utils.py +++ b/vega/common/utils.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Utils tools.""" @@ -15,12 +21,12 @@ import sys import logging import imp -import numpy as np import random import socket +from contextlib import contextmanager from functools import wraps from copy import deepcopy -from contextlib import contextmanager +import numpy as np logger = logging.getLogger(__name__) @@ -44,7 +50,7 @@ def get_instance(*args, **kw): return get_instance -def update_dict(src, dst, exclude=['loss', 'metric', 'lr_scheduler', 'optim', 'model_desc', 'transforms']): +def update_dict(src, dst, exclude=None): """Use src dictionary update dst dictionary. :param dict src: Source dictionary. @@ -52,6 +58,8 @@ def update_dict(src, dst, exclude=['loss', 'metric', 'lr_scheduler', 'optim', 'm :return: Updated dictionary. :rtype: Dictionary """ + if exclude is None: + exclude = ['loss', 'metric', 'lr_scheduler', 'optim', 'model_desc', 'transforms'] exclude_keys = exclude or [] for key in src.keys(): if key in dst.keys() and key not in exclude_keys: @@ -192,6 +200,23 @@ def verify_requires(requires): return True +def verify_platform_pkgs(pkgs: list) -> bool: + """Verify pytorch, tensorflow or mindspore.""" + failed = [] + for module, pkg in pkgs: + try: + __import__(module) + except Exception: + failed.append(pkg) + if failed: + logger.error("Missing modules: {}".format(failed)) + logger.error("Please run the following command:") + for pkg in failed: + logger.error(" pip3 install --user \"{}\"".format(pkg)) + return False + return True + + def remove_np_value(value): """Remove np.int64 and np.float32.""" if value is None: @@ -233,6 +258,7 @@ def get_available_port(min_port=8000, max_port=9999): _sock.close() return port except Exception: + logging.debug('Failed to get available port, continue.') continue return None diff --git a/vega/common/utils_torch.py b/vega/common/utils_torch.py index bea0c7e..d1bd9ea 100644 --- a/vega/common/utils_torch.py +++ b/vega/common/utils_torch.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Torch utils tools.""" diff --git a/vega/common/wrappers.py b/vega/common/wrappers.py index db7fe44..8bc97de 100644 --- a/vega/common/wrappers.py +++ b/vega/common/wrappers.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Provide wrapper functions.""" @@ -14,7 +20,7 @@ from inspect import signature as sig from functools import wraps import vega -from vega.common import ClassFactory, init_log, close_log, General +from vega.common import ClassFactory, init_log, close_log, General, ClassType def metric(name=None): @@ -39,12 +45,15 @@ def wrapper(*args, **kwargs): params_sig = sig(func).parameters params = {param: value for param, value in kwargs.items() if param in params_sig} return func(*args, **params) + return wrapper + return decorator def train_process_wrapper(func): """Train process wrapper.""" + @wraps(func) def wrapper(self, *args, **kwargs): """Wrap method.""" @@ -68,6 +77,7 @@ def wrapper(self, *args, **kwargs): restore_rank_envs() close_log(fh) return r + return wrapper @@ -89,3 +99,28 @@ def restore_rank_envs(): global _envs for env in _envs: os.environ[env] = _envs[env] + + +def callbacks(name=None): + """Make function as a metrics, use the same params from configuration. + + :param func: source function + :return: wrapper + """ + + def decorator(func): + """Provide input param to decorator. + + :param func: wrapper function + :return: decoratpr + """ + ClassFactory.register_cls(func, ClassType.CALLBACK_FN, alias=name) + + @wraps(func) + def wrapper(*args, **kwargs): + """Make function as a wrapper.""" + return func(*args, **kwargs) + + return wrapper + + return decorator diff --git a/vega/common/zmq_op.py b/vega/common/zmq_op.py index 67e38c5..fbba65d 100644 --- a/vega/common/zmq_op.py +++ b/vega/common/zmq_op.py @@ -1,30 +1,48 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ZMQ operation.""" import zmq +from vega.common import General +from vega.common.task_ops import TaskOps def listen(ip, min_port, max_port, max_tries): """Listen on the server.""" - context = zmq.Context() - socket = context.socket(zmq.REP) - port = socket.bind_to_random_port( - f"tcp://{ip}", min_port=min_port, max_port=max_port, max_tries=100) - return socket, port + if not General.security: + context = zmq.Context() + socket = context.socket(zmq.REP) + port = socket.bind_to_random_port( + f"tcp://{ip}", min_port=min_port, max_port=max_port, max_tries=100) + return socket, port + else: + from vega.security.zmq_op import listen_security + temp_path = TaskOps().temp_path + return listen_security(ip, min_port, max_port, max_tries, temp_path) def connect(ip, port): """Connect to server.""" - context = zmq.Context() - socket = context.socket(zmq.REQ) - socket.connect(f"tcp://{ip}:{port}") - return socket + if not General.security: + context = zmq.Context() + socket = context.socket(zmq.REQ) + socket.connect(f"tcp://{ip}:{port}") + return socket + else: + from vega.security.zmq_op import connect_security + temp_path = TaskOps().temp_path + return connect_security(ip, port, temp_path) diff --git a/vega/core/__init__.py b/vega/core/__init__.py index e95ee59..0881275 100644 --- a/vega/core/__init__.py +++ b/vega/core/__init__.py @@ -1,2 +1,2 @@ -from .run import run, env_args, init_cluster_args from vega.common import FileOps, TaskOps, UserConfig, module_existed +from .run import run, env_args, init_cluster_args diff --git a/vega/core/pipeline/__init__.py b/vega/core/pipeline/__init__.py index 4b2727f..42ce9dd 100644 --- a/vega/core/pipeline/__init__.py +++ b/vega/core/pipeline/__init__.py @@ -1,14 +1,14 @@ -# -*- coding:utf-8 -*- -from .pipe_step import PipeStep -from .pipeline import Pipeline -from vega.common.class_factory import ClassFactory - - -ClassFactory.lazy_register("vega.core.pipeline", { - "search_pipe_step": ["SearchPipeStep"], - "train_pipe_step": ["TrainPipeStep"], - "benchmark_pipe_step": ["BenchmarkPipeStep"], - "multi_task_pipe_step": ["MultiTaskPipeStep"], - "horovod_train_step": ["HorovodTrainStep"], - "hccl_train_step": ["HcclTrainStep"], -}) +# -*- coding:utf-8 -*- +from vega.common.class_factory import ClassFactory +from .pipe_step import PipeStep +from .pipeline import Pipeline + + +ClassFactory.lazy_register("vega.core.pipeline", { + "search_pipe_step": ["SearchPipeStep"], + "train_pipe_step": ["TrainPipeStep"], + "benchmark_pipe_step": ["BenchmarkPipeStep"], + "multi_task_pipe_step": ["MultiTaskPipeStep"], + "horovod_train_step": ["HorovodTrainStep"], + "hccl_train_step": ["HcclTrainStep"], +}) diff --git a/vega/core/pipeline/benchmark_pipe_step.py b/vega/core/pipeline/benchmark_pipe_step.py index 718da45..f8d61b0 100644 --- a/vega/core/pipeline/benchmark_pipe_step.py +++ b/vega/core/pipeline/benchmark_pipe_step.py @@ -1,28 +1,33 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Benchmark Pipe Step defined in Pipeline.""" import logging import os import traceback -from vega.common import FileOps, Config +from vega.common import FileOps, Config, TaskOps from vega.common import ClassFactory, ClassType from vega.common.general import General -from vega.common.task_ops import TaskOps from vega.core.pipeline.conf import PipeStepConfig, PipelineConfig from vega.evaluator.conf import EvaluatorConfig from vega.report import ReportClient, ReportRecord, ReportServer +from vega.common import Status from .pipe_step import PipeStep from ..scheduler import create_master -from vega.common import Status logger = logging.getLogger(__name__) @@ -114,7 +119,7 @@ def _evaluate_single_model(self, record): model_desc=record.desc, weights_file=record.weights_file) self.master.run(evaluator) - except Exception: - logger.error("Failed to evaluate model, worker info={}".format(worker_info)) - logger.error(traceback.format_exc()) + except Exception as e: + logger.error(f"Failed to evaluate model, worker info: {worker_info}, message: {e}") + logger.debug(traceback.format_exc()) return diff --git a/vega/core/pipeline/conf.py b/vega/core/pipeline/conf.py index 70a8414..a0996ad 100644 --- a/vega/core/pipeline/conf.py +++ b/vega/core/pipeline/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Conf for Pipeline.""" from vega.common import ClassType @@ -21,7 +27,6 @@ class SearchSpaceConfig(ConfigSerializable): """Default Search Space config for Pipeline.""" - # _type_name = ClassType.NETWORK type = None @classmethod @@ -40,11 +45,6 @@ def check_config(cls, config): "modules": {"type": list} } valid_rule(cls, config, check_rules_searchspace) - # to do - # for module in config["modules"]: - # if module not in config: - # raise Exception( - # "{} is required in {}".format(module, cls.__name__)) class SearchAlgorithmConfig(ConfigSerializable): diff --git a/vega/core/pipeline/generator.py b/vega/core/pipeline/generator.py index 37f51ba..6437e2f 100644 --- a/vega/core/pipeline/generator.py +++ b/vega/core/pipeline/generator.py @@ -1,29 +1,35 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Generator for SearchPipeStep.""" import logging import os -import pickle +from pickle import HIGHEST_PROTOCOL from copy import deepcopy import vega from vega.core.search_algs import SearchAlgorithm from vega.core.search_space.search_space import SearchSpace from vega.core.pipeline.conf import PipeStepConfig from vega.common.general import General -from vega.common.task_ops import TaskOps from vega.report import ReportServer, ReportClient from vega.common.config import Config from vega.common import update_dict from vega.common.utils import remove_np_value from vega.common.parameter_sharing import ParameterSharing +from vega.common import FileOps, TaskOps class Generator(object): @@ -39,7 +45,7 @@ def __init__(self): @property def is_completed(self): """Define a property to determine search algorithm is completed.""" - return self.search_alg.is_completed or vega.quota().quota_reached + return self.search_alg.is_completed or vega.get_quota().quota_reached def sample(self): """Sample a work id and model from search algorithm.""" @@ -55,11 +61,14 @@ def sample(self): num_samples = len(res) if num_samples == 0: return None + for sample in res: - (id, desc, hps, kwargs) = self._get_hps_desc_from_sample(sample) - if not vega.quota().verify_sample(desc) or not vega.quota().verify_affinity(desc): + decode_sample = self.search_alg.decode(sample) if hasattr( + self.search_alg, "decode") else self._get_hps_desc_from_sample(sample) + (worker_id, desc, hps, kwargs) = decode_sample + (dict(), ) * (4 - len(decode_sample)) + if not vega.get_quota().verify_sample(desc) or not vega.get_quota().verify_affinity(desc): continue - out.append((id, desc, hps)) + out.append((worker_id, desc, hps)) kwargs_list.append(kwargs) if len(out) >= num_samples: break @@ -69,20 +78,22 @@ def sample(self): def _get_hps_desc_from_sample(self, sample): if isinstance(sample, dict): - id = sample["worker_id"] + worker_id = sample["worker_id"] desc = sample["encoded_desc"] sample.pop("worker_id") sample.pop("encoded_desc") kwargs = sample - sample = _split_sample((id, desc)) + sample = _split_sample((worker_id, desc)) else: kwargs = {} sample = _split_sample(sample) if hasattr(self, "objective_keys") and self.objective_keys: kwargs["objective_keys"] = self.objective_keys - (id, desc, hps) = sample + (worker_id, desc, hps) = sample if hasattr(self.search_alg.search_space, "to_desc"): desc = self.search_alg.search_space.to_desc(desc) + elif desc.get("type") == 'DagNetwork': + desc = desc else: desc = self._decode_hps(desc) hps = self._decode_hps(hps) @@ -99,7 +110,7 @@ def _get_hps_desc_from_sample(self, sample): if network_desc is not None: desc.update(network_desc) - return id, desc, hps, kwargs + return worker_id, desc, hps, kwargs def _split_hps_desc(self, hps, desc): if "type" not in desc or desc.get("type") != "Sequential": @@ -128,10 +139,6 @@ def update(self, step_name, worker_id): logging.debug("Get Record=%s", str(record)) self.search_alg.update(record.serialize()) ParameterSharing().remove() - # try: - # self.dump() - # except Exception: - # logging.warning("The Generator contains object which can't be pickled.") logging.info(f"Update Success. step_name={step_name}, worker_id={worker_id}") logging.info("Best values: %s", ReportServer().print_best(step_name=General.step_name)) @@ -157,7 +164,6 @@ def _decode_hps(hps): hp_dict = {key: hp_dict} else: hp_dict = {key: value} - # update cfg with hps hps_dict = update_dict(hps_dict, hp_dict, []) return Config(hps_dict) @@ -165,8 +171,7 @@ def dump(self): """Dump generator to file.""" step_path = TaskOps().step_path _file = os.path.join(step_path, ".generator") - with open(_file, "wb") as f: - pickle.dump(self, f, protocol=pickle.HIGHEST_PROTOCOL) + FileOps.dump_pickle(self, _file, protocol=HIGHEST_PROTOCOL) @classmethod def restore(cls): @@ -174,14 +179,13 @@ def restore(cls): step_path = TaskOps().step_path _file = os.path.join(step_path, ".generator") if os.path.exists(_file): - with open(_file, "rb") as f: - return pickle.load(f) + return FileOps.load_pickle(_file) else: return None def _split_sample(sample): - """Split sample to (id, model_desc, hps).""" + """Split sample to (worker_id, model_desc, hps).""" if len(sample) not in [2, 3]: raise Exception("Incorrect sample length, sample: {}".format(sample)) if len(sample) == 3: diff --git a/vega/core/pipeline/hccl_train_step.py b/vega/core/pipeline/hccl_train_step.py index 985bb96..487e611 100644 --- a/vega/core/pipeline/hccl_train_step.py +++ b/vega/core/pipeline/hccl_train_step.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """HCCL fully train.""" @@ -14,13 +20,14 @@ import logging import json import vega -from .train_pipe_step import TrainPipeStep from vega.common.general import General from vega.common.class_factory import ClassFactory, ClassType from vega.common import Status, TaskOps from vega.report import ReportServer from vega.core.scheduler import create_master from vega.trainer.conf import TrainerConfig +from vega.security.args import path_verify +from .train_pipe_step import TrainPipeStep logger = logging.getLogger(__name__) @@ -76,19 +83,27 @@ def train_model(self, trainer): def _set_nccl_ip_port(self): if not vega.is_torch_backend(): return - rank_file = os.environ["RANK_TABLE_FILE"] + rank_file = os.getenv('RANK_TABLE_FILE', None) + if not rank_file: + raise ValueError('RANK_TABLE_FILE not in environ.') + rank_file = os.path.realpath(rank_file) + rank_file = path_verify(rank_file) with open(rank_file, 'r') as f: data = json.loads(f.read()) General.cluster.hccl_server_ip = data['server_list'][0]['server_id'] if "server_port" in data['server_list'][0]: General.cluster.hccl_port = int(data['server_list'][0]["server_port"]) - os.environ["vega_pytorch_hccl_port"] = {General.cluster.hccl_port} + os.environ["vega_pytorch_hccl_port"] = str(General.cluster.hccl_port) logger.info(f"HCCL server: tcp://{General.cluster.hccl_server_ip}:{General.cluster.hccl_port}") def _new_rank_table_file(self): if not vega.is_torch_backend(): return - rank_file = os.environ["RANK_TABLE_FILE"] + rank_file = os.getenv('RANK_TABLE_FILE', None) + if not rank_file: + raise ValueError('RANK_TABLE_FILE not in environ.') + rank_file = os.path.realpath(rank_file) + rank_file = path_verify(rank_file) with open(rank_file, 'r') as f: data = json.loads(f.read()) device_ids = os.environ["NPU_VISIBLE_DEVICES"].split(",") diff --git a/vega/core/pipeline/horovod/horovod_train.py b/vega/core/pipeline/horovod/horovod_train.py index 4d768d1..74dd341 100644 --- a/vega/core/pipeline/horovod/horovod_train.py +++ b/vega/core/pipeline/horovod/horovod_train.py @@ -1,40 +1,39 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Running Horovod Train.""" import os -import pickle import argparse import logging import horovod.torch as hvd from vega.common import ClassFactory from vega.common.general import General from vega.core.pipeline.conf import PipeStepConfig +from vega.common import FileOps + parser = argparse.ArgumentParser(description='Horovod Fully Train') parser.add_argument('--cf_file', type=str, help='ClassFactory pickle file') args = parser.parse_args() -if 'VEGA_INIT_ENV' in os.environ: - exec(os.environ.copy()['VEGA_INIT_ENV']) logging.info('start horovod setting') hvd.init() -try: - import moxing as mox - mox.file.set_auth(obs_client_log=False) -except Exception: - pass hvd.join() -with open(args.cf_file, 'rb') as f: - cf_content = pickle.load(f) +cf_content = FileOps.load_pickle(args.cf_file) model_desc = cf_content.get('model_desc') worker_id = cf_content.get('worker_id') ClassFactory.__registry__ = cf_content.get('registry') diff --git a/vega/core/pipeline/horovod/kube_plm_rsh_agent b/vega/core/pipeline/horovod/kube_plm_rsh_agent deleted file mode 100644 index 68cada3..0000000 --- a/vega/core/pipeline/horovod/kube_plm_rsh_agent +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -set -x -POD_NAME=$1 -shift -kubectl exec ${POD_NAME} -- /bin/sh -c "$*" diff --git a/vega/core/pipeline/horovod/run_horovod_train.sh b/vega/core/pipeline/horovod/run_horovod_train.sh index 3207d47..be1f3e9 100644 --- a/vega/core/pipeline/horovod/run_horovod_train.sh +++ b/vega/core/pipeline/horovod/run_horovod_train.sh @@ -1,21 +1,3 @@ -#!/usr/bin/env bash -# This script runs the Horovod training job on the modelarts platform or cluster. -# basepath=$(cd `dirname $0`; pwd) -# SCRIPT_PATH=${basepath}/horovod_train.py -# run_experiment() { -# local np=$1 -# shift -# mpirun -np $np \ -# --hostfile ${HOST_FILE_PATH} \ -# -bind-to socket \ -# -x NCCL_DEBUG=INFO -x MPI_HOME -x LD_LIBRARY_PATH -x PATH \ -# -x HOROVOD_MPI_THREADS_DISABLE=1 \ -# -mca plm_rsh_no_tree_spawn true \ -# $@ -# } -# nps=$1 -# run_experiment $nps python3 $SCRIPT_PATH --cf_file $2 - basepath=$(cd `dirname $0`; pwd) SCRIPT_PATH=${basepath}/horovod_train.py nps=$1 diff --git a/vega/core/pipeline/horovod_train_step.py b/vega/core/pipeline/horovod_train_step.py index 912e9c3..a7aa6e3 100644 --- a/vega/core/pipeline/horovod_train_step.py +++ b/vega/core/pipeline/horovod_train_step.py @@ -1,27 +1,32 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Horovod fully train.""" import os import logging import subprocess -import pickle -import vega -from .train_pipe_step import TrainPipeStep from vega.common.general import General from vega.common.class_factory import ClassFactory, ClassType from vega.common import Status from vega.report import ReportServer from vega.core.pipeline.conf import PipeStepConfig from vega.trainer.conf import TrainerConfig +from vega.common import FileOps +from .train_pipe_step import TrainPipeStep logger = logging.getLogger(__name__) @@ -62,20 +67,13 @@ def train_model(self, trainer): 'pipe_step_config': PipeStepConfig().to_dict(), 'model_desc': trainer.model_desc, 'worker_id': trainer.worker_id} - with open(cf_file, 'wb') as f: - pickle.dump(cf_content, f) - if os.environ.get('DLS_TASK_NUMBER') is None: - # local cluster - worker_ips = '127.0.0.1' - if General.cluster.master_ip is not None and General.cluster.master_ip != '127.0.0.1': - worker_ips = General.cluster.master_ip - for ip in General.cluster.slaves: - worker_ips = worker_ips + ',' + ip - cmd = ['bash', f'{pwd_dir}/horovod/run_horovod_train.sh', - str(General.cluster.num_workers), cf_file, worker_ips, General.python_command] - else: - # Roma - cmd = ['bash', '/home/work/run_horovod_train.sh', - str(General.cluster.num_workers), cf_file] + FileOps.dump_pickle(cf_content, cf_file) + worker_ips = '127.0.0.1' + if General.cluster.master_ip is not None and General.cluster.master_ip != '127.0.0.1': + worker_ips = General.cluster.master_ip + for ip in General.cluster.slaves: + worker_ips = worker_ips + ',' + ip + cmd = ['bash', f'{pwd_dir}/horovod/run_horovod_train.sh', + str(General.cluster.num_workers), cf_file, worker_ips, General.python_command] proc = subprocess.Popen(cmd, env=os.environ) proc.wait() diff --git a/vega/core/pipeline/multi_task_pipe_step.py b/vega/core/pipeline/multi_task_pipe_step.py index 3228d15..1c38ba4 100644 --- a/vega/core/pipeline/multi_task_pipe_step.py +++ b/vega/core/pipeline/multi_task_pipe_step.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Multi-task pipe step.""" diff --git a/vega/core/pipeline/pipe_step.py b/vega/core/pipeline/pipe_step.py index da8885d..bd7fcb5 100644 --- a/vega/core/pipeline/pipe_step.py +++ b/vega/core/pipeline/pipe_step.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """PipeStep that used in Pipeline.""" diff --git a/vega/core/pipeline/pipeline.py b/vega/core/pipeline/pipeline.py index ec5b2f6..1fff5c0 100644 --- a/vega/core/pipeline/pipeline.py +++ b/vega/core/pipeline/pipeline.py @@ -1,29 +1,36 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Pipeline that string up all PipeSteps.""" + import os import traceback import logging import signal -import pandas as pd import json -from .pipe_step import PipeStep +import pandas as pd from vega.common.user_config import UserConfig from vega.common import FileOps, TaskOps, Status from vega.core.scheduler import shutdown_cluster from vega.common.general import General -from .conf import PipeStepConfig, PipelineConfig from vega.report import ReportServer from vega.common.message_server import MessageServer from vega.common.parameter_sharing import ParameterSharing +from .pipe_step import PipeStep +from .conf import PipeStepConfig, PipelineConfig logger = logging.getLogger(__name__) @@ -78,7 +85,7 @@ def _shutdown_cluster(signum, frame): self.steps.append(pipestep) pipestep.do() except Exception as e: - logger.error("Failed to run pipeline.") + logger.error(f"Failed to run pipeline, message: {e}") logger.error(traceback.format_exc()) error_occured = True if "pipestep" in locals(): diff --git a/vega/core/pipeline/search_pipe_step.py b/vega/core/pipeline/search_pipe_step.py index dbbb9ed..8e7d246 100644 --- a/vega/core/pipeline/search_pipe_step.py +++ b/vega/core/pipeline/search_pipe_step.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Nas Pipe Step defined in Pipeline.""" import logging @@ -14,15 +20,15 @@ import os import glob import shutil -from .pipe_step import PipeStep -from .generator import Generator -from ..scheduler import create_master from vega.common import ClassFactory, ClassType -from ..pipeline.conf import PipeStepConfig from vega.report import ReportServer from vega.common.general import General from vega.common import TaskOps, Status from vega.trainer.conf import TrainerConfig +from .pipe_step import PipeStep +from .generator import Generator +from ..scheduler import create_master +from ..pipeline.conf import PipeStepConfig @ClassFactory.register(ClassType.PIPE_STEP) diff --git a/vega/core/pipeline/train_pipe_step.py b/vega/core/pipeline/train_pipe_step.py index 7e0da12..ed1c4e4 100644 --- a/vega/core/pipeline/train_pipe_step.py +++ b/vega/core/pipeline/train_pipe_step.py @@ -1,19 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Fully Train PipeStep that used in Pipeline.""" import os import logging import vega -from .pipe_step import PipeStep from vega.common.general import General from vega.common.class_factory import ClassFactory, ClassType from vega.common import FileOps, TaskOps, Status @@ -21,6 +26,7 @@ from vega.core.scheduler import create_master from vega.core.pipeline.conf import PipeStepConfig, PipelineConfig from vega.trainer.conf import TrainerConfig +from .pipe_step import PipeStep logger = logging.getLogger(__name__) @@ -84,13 +90,14 @@ def _build_trainer(self, model_desc=None, hps=None, model_id=None, weights_file= cls_trainer = ClassFactory.get_cls(ClassType.TRAINER, PipeStepConfig.trainer.type) step_name = self.task.step_name if model_desc is not None: - sample = dict(worker_id=model_id, desc=model_desc, step_name=step_name) + sample = dict(worker_id=model_id, desc=model_desc, step_name=step_name, weights_file=weights_file) record = ReportRecord().load_dict(sample) logging.debug("update record=%s", str(record)) trainer = cls_trainer(model_desc=model_desc, hps=hps, id=model_id, pretrained_model_file=weights_file) else: trainer = cls_trainer(None, 0, hps=hps) - record = ReportRecord(trainer.step_name, trainer.worker_id, desc=trainer.model_desc, hps=hps) + record = ReportRecord(trainer.step_name, trainer.worker_id, desc=trainer.model_desc, hps=hps, + weights_file=weights_file) ReportClient().update(**record.to_dict()) # resume training if vega.is_torch_backend() and General._resume: diff --git a/vega/core/run.py b/vega/core/run.py index 317aae1..de4ebdd 100644 --- a/vega/core/run.py +++ b/vega/core/run.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """vega run.py.""" import sys @@ -15,11 +21,11 @@ import vega from vega.common.utils import init_log, lazy from vega.common import Config, UserConfig -from vega.common.task_ops import TaskOps -from .pipeline.pipeline import Pipeline +from vega.common import TaskOps from vega import set_backend from vega.common.general import General from vega.core.pipeline.conf import PipelineConfig +from .pipeline.pipeline import Pipeline logger = logging.getLogger(__name__) @@ -67,7 +73,7 @@ def _run_pipeline(): def _adjust_config(): - vega.quota().adjuest_pipeline_by_runtime(UserConfig().data) + vega.get_quota().adjuest_pipeline_by_runtime(UserConfig().data) @lazy diff --git a/vega/core/scheduler/dask_env.py b/vega/core/scheduler/dask_env.py index 68071d8..0e6e1f4 100644 --- a/vega/core/scheduler/dask_env.py +++ b/vega/core/scheduler/dask_env.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The main part of the cluster framework. @@ -18,12 +24,12 @@ import os import logging import time +import uuid from datetime import datetime from vega.trainer import utils -from vega.common.file_ops import FileOps +from vega.common import FileOps from vega.common.general import General -from vega.core.scheduler.run_dask import get_client, run_scheduler,\ - run_local_worker, run_remote_worker, get_address +from vega.core.scheduler.run_dask import get_client, run_scheduler, run_local_worker, run_remote_worker, get_address class DaskEnv(object): @@ -98,7 +104,7 @@ def _get_slave_device_num(self): try: system_device_num = len(os.environ['NPU_VISIBLE_DEVICES'].split(',')) except Exception: - pass + logging.debug("Failed to get NPU_VISIBLE_DEVICES in environ.") else: raise Exception('device category must be GPU or NPU.') return system_device_num @@ -120,7 +126,7 @@ def start(self): return True def stop(self): - """TODO, stop the current cluster.""" + """Stop the current cluster.""" return def _start_dask(self): @@ -128,57 +134,49 @@ def _start_dask(self): then wait and start dask-worker on all nodes. """ - the_ip, the_port = utils.get_master_address(self.args) - logging.info("master ip and port: {}:{}".format(the_ip, the_port)) - if 'PYTHONPATH' in os.environ: - os.environ['PYTHONPATH'] = "{}:{}:{}".format( - os.environ['PYTHONPATH'], self.__master_path__, os.path.abspath(os.curdir)) - elif self.__master_path__ is not None: - os.environ['PYTHONPATH'] = "{}:{}".format( - self.__master_path__, os.path.abspath(os.curdir)) - - # set distributed configs - # os.environ['DASK_DISTRIBUTED__CLIENT__HEARTBEAT'] = '10s' - + master_ip, master_port = utils.get_master_address(self.args) + logging.info("master ip and port: {}:{}".format(master_ip, master_port)) + logging.info("Initializing cluster. Please wait.") + if "PYTHONPATH" not in os.environ: + os.environ["PYTHONPATH"] = "" + if self.__master_path__ not in os.environ["PYTHONPATH"].split(":"): + os.environ["PYTHONPATH"] += f":{self.__master_path__}" + if os.path.abspath(os.curdir) not in os.environ["PYTHONPATH"].split(":"): + os.environ["PYTHONPATH"] += f":{os.path.abspath(os.curdir)}" if self.args.rank == 0: - # host = utils.get_local_address() - utils.save_master_ip(the_ip, the_port, self.args) - address = "--node-ip-address={}".format(the_ip) - port = "--port={}".format(the_port) try: - get_client(get_address(the_ip, the_port)) - logging.info("Reusing previous cluster:{}:{}".format(the_ip, the_port)) + get_client(get_address(master_ip, master_port)) + logging.info("Reusing previous cluster:{}:{}".format(master_ip, master_port)) return except Exception: - logging.info("Dask-scheduler not start. Start dask-scheduler in master {}".format(the_ip)) - scheduler_p = run_scheduler(port=port) + logging.info("Dask-scheduler not start. Start dask-scheduler in master {}".format(master_ip)) + scheduler_file = f"{self.temp_path}/.scheduler/scheduler.tmp" + FileOps.make_base_dir(scheduler_file) + scheduler_p = run_scheduler(ip=master_ip, port=master_port, tmp_file=scheduler_file) self._cluster_pid.append(scheduler_p.pid) - time.sleep(10) - master_host, master_port = utils.get_master_address(self.args) - address = "tcp://{0}:{1}".format(master_host, master_port) - self.master_address = get_address(master_host, master_port) - logging.info("master host({}), address({}).".format(master_host, address)) + self.master_address = get_address(master_ip, master_port) + logging.info("master host({}), address({}).".format(master_ip, self.master_address)) self._check_dask_scheduler() - # nproc_set = "--nprocs={}".format(self.slave_proc_num) - _local_dir = "{}/.vega_worker_{}".format( - self.temp_path, - datetime.now().strftime('%m%d.%H%M%S.%f')[:-3]) - FileOps.make_dir(_local_dir) - local_dir = "--local-directory={}".format(_local_dir) + local_dir = f"{self.temp_path}/.vega_worker" + FileOps.make_dir(local_dir) # standalone boot mode, not dask-work is start by script if General.cluster.standalone_boot: return # run dask-worker in master for _ in range(self.slave_proc_num): - worker_p = run_local_worker(address=address, local_dir=local_dir) + local_master_dir = local_dir + '/{}'.format(uuid.uuid1().hex[:8]) + FileOps.make_dir(local_master_dir) + worker_p = run_local_worker(slave_ip=master_ip, address=self.master_address, local_dir=local_master_dir) self._cluster_pid.append(worker_p.pid) # run dask-worker in each slaves. for slave_ip in self.slaves: for _ in range(self.slave_proc_num): - worker_p = run_remote_worker(slave_ip=slave_ip, address=address, local_dir=local_dir) + local_slaves_dir = local_dir + '/{}'.format(uuid.uuid1().hex[:8]) + FileOps.make_dir(local_slaves_dir) + worker_p = run_remote_worker(slave_ip=slave_ip, address=self.master_address, local_dir=local_slaves_dir) self._cluster_pid.append(worker_p.pid) def _check_dask_scheduler(self): @@ -203,7 +201,7 @@ def _wait_workers(self): worker_count_min = int(self.world_size * self.worker_portion) for _ in range(100): - time.sleep(5) + time.sleep(1) n_workers = len(self.client.scheduler_info()["workers"]) logging.info("Accessed Workers: {}".format(n_workers)) if n_workers >= worker_count_min: diff --git a/vega/core/scheduler/distribution.py b/vega/core/scheduler/distribution.py index f5e2675..44443d6 100644 --- a/vega/core/scheduler/distribution.py +++ b/vega/core/scheduler/distribution.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ @@ -265,8 +271,6 @@ def close(self): def join(self): """Wait all process in pool to finish.""" - # self.close() - # self.process_pool.join() for pid, res in self.process_list: if res is not None and not res.ready(): res.wait() diff --git a/vega/core/scheduler/local_master.py b/vega/core/scheduler/local_master.py index da21a4c..4efcc23 100644 --- a/vega/core/scheduler/local_master.py +++ b/vega/core/scheduler/local_master.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The LocalMaster's method is same as Master, and the class is used on single node.""" @@ -56,9 +62,9 @@ def run(self, worker, evaluator=None): for worker in workers: try: worker.train_process() - except Exception: - logging.error(traceback.format_exc()) - logging.error(f"Failed to run worker, id={worker.worker_id}") + except Exception as e: + logging.debug(traceback.format_exc()) + logging.error(f"Failed to run worker, id: {worker.worker_id}, message: {e}") self._update(step_name, worker_id) diff --git a/vega/core/scheduler/master.py b/vega/core/scheduler/master.py index 2cf5d7c..8f0c54c 100644 --- a/vega/core/scheduler/master.py +++ b/vega/core/scheduler/master.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The main part of the cluster framework. @@ -24,15 +30,15 @@ from threading import Lock from queue import Queue from vega.trainer import utils -from .distribution import ClusterDaskDistributor from vega.common import TaskOps, FileOps from vega.common.general import General -from .worker_env import WorkerEnv -from .dask_env import DaskEnv from vega.trainer.deserialize import pickle_worker from vega.trainer.run_remote_worker import run_remote_worker -from .master_base import MasterBase from vega.report import ReportClient +from .distribution import ClusterDaskDistributor +from .worker_env import WorkerEnv +from .dask_env import DaskEnv +from .master_base import MasterBase class Master(MasterBase): diff --git a/vega/core/scheduler/master_base.py b/vega/core/scheduler/master_base.py index 1b4a01b..31d2cfd 100644 --- a/vega/core/scheduler/master_base.py +++ b/vega/core/scheduler/master_base.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The MasterBase class.""" diff --git a/vega/core/scheduler/master_ops.py b/vega/core/scheduler/master_ops.py index 814fce5..ab3b2fd 100644 --- a/vega/core/scheduler/master_ops.py +++ b/vega/core/scheduler/master_ops.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The MasterFactory method. @@ -55,5 +61,5 @@ def shutdown_cluster(): time.sleep(12) logger.info("Cluster is shut down.") except Exception as e: - logger.error("Pipeline's cluster shutdown error: {}".format(str(e))) - logger.error(traceback.format_exc()) + logger.error(f"Pipeline's cluster shutdown error, message: {e}") + logger.debug(traceback.format_exc()) diff --git a/vega/core/scheduler/run_dask.py b/vega/core/scheduler/run_dask.py index 57f629c..148f53b 100644 --- a/vega/core/scheduler/run_dask.py +++ b/vega/core/scheduler/run_dask.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Run dask scheduler and worker.""" @@ -14,55 +20,86 @@ import subprocess import shutil from distributed import Client +from vega.common import General def get_client(address): """Get dask client.""" - return Client(address) + if not General.security: + return Client(address) + else: + from vega.security.run_dask import get_client_security + return get_client_security(address) def get_address(master_host, master_port): """Get master address.""" - return "tcp://{}:{}".format(master_host, master_port) + if not General.security: + return "tcp://{}:{}".format(master_host, master_port) + else: + from vega.security.run_dask import get_address_security + return get_address_security(master_host, master_port) -def run_scheduler(port): +def run_scheduler(ip, port, tmp_file): """Run dask-scheduler.""" - id = subprocess.Popen( - ["dask-scheduler", "--no-dashboard", "--no-show", port], - env=os.environ - ) - return id + if not General.security: + return subprocess.Popen( + [ + "dask-scheduler", + "--no-dashboard", + "--no-show", + f"--host={ip}", + f"--port={port}", + f"--scheduler-file={tmp_file}", + ], + env=os.environ + ) + else: + from vega.security.run_dask import run_scheduler_security + return run_scheduler_security(ip, port, tmp_file) -def run_local_worker(address, local_dir): - """Run dask-worker on local.""" - id = subprocess.Popen( - [ - "dask-worker", - address, - '--nthreads=1', - '--nprocs=1', - '--memory-limit=0', - local_dir], - env=os.environ - ) - return id +def run_local_worker(slave_ip, address, local_dir): + """Run dask-worker on local node.""" + if not General.security: + return subprocess.Popen( + [ + "dask-worker", + address, + '--nthreads=1', + '--nprocs=1', + '--memory-limit=0', + "--no-dashboard", + f"--local-directory={local_dir}", + f"--host={slave_ip}", + ], + env=os.environ + ) + else: + from vega.security.run_dask import run_local_worker_security + return run_local_worker_security(slave_ip, address, local_dir) def run_remote_worker(slave_ip, address, local_dir): """Run dask-worker on remove node.""" - id = subprocess.Popen( - [ - "ssh", - slave_ip, - shutil.which("dask-worker"), - address, - '--nthreads=1', - '--nprocs=1', - '--memory-limit=0', - local_dir - ], - env=os.environ - ) - return id + if not General.security: + id = subprocess.Popen( + [ + "ssh", + slave_ip, + shutil.which("dask-worker"), + address, + '--nthreads=1', + '--nprocs=1', + '--memory-limit=0', + "--no-dashboard", + f"--local-directory={local_dir}", + f"--host={slave_ip}", + ], + env=os.environ + ) + return id + else: + from vega.security.run_dask import run_remote_worker_security + return run_remote_worker_security(slave_ip, address, local_dir) diff --git a/vega/core/scheduler/worker_env.py b/vega/core/scheduler/worker_env.py index 7819ac8..3cba44b 100644 --- a/vega/core/scheduler/worker_env.py +++ b/vega/core/scheduler/worker_env.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The main part of the cluster framework. @@ -53,7 +59,6 @@ def _save_master_env(self): "BACKEND_TYPE": os.environ.get("BACKEND_TYPE", None), "LD_PRELOAD": os.environ.get("LD_PRELOAD", None), "DLS_JOB_ID": os.environ.get("DLS_JOB_ID", None), - "vega_init_env": os.environ.get("vega_init_env", None), "vega_python_command": os.environ.get("vega_python_command", None), "vega_timeout": os.environ.get("vega_timeout", None), "vega_world_size": os.environ.get("WORLD_SIZE", None), diff --git a/vega/core/search_algs/__init__.py b/vega/core/search_algs/__init__.py index fcf0e93..6bb7494 100644 --- a/vega/core/search_algs/__init__.py +++ b/vega/core/search_algs/__init__.py @@ -1,8 +1,8 @@ +from vega.common.class_factory import ClassFactory from .search_algorithm import SearchAlgorithm from .ea_conf import EAConfig from .pareto_front_conf import ParetoFrontConfig from .pareto_front import ParetoFront -from vega.common.class_factory import ClassFactory ClassFactory.lazy_register("vega.core.search_algs", { diff --git a/vega/core/search_algs/codec/codec.py b/vega/core/search_algs/codec/codec.py index 8cf429d..cce502b 100644 --- a/vega/core/search_algs/codec/codec.py +++ b/vega/core/search_algs/codec/codec.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined the basic Codec.""" from vega.common import ClassFactory, ClassType diff --git a/vega/core/search_algs/ea_conf.py b/vega/core/search_algs/ea_conf.py index d0cf4c5..f9c2311 100644 --- a/vega/core/search_algs/ea_conf.py +++ b/vega/core/search_algs/ea_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/core/search_algs/pareto_front.py b/vega/core/search_algs/pareto_front.py index 4e61d8a..3d21042 100644 --- a/vega/core/search_algs/pareto_front.py +++ b/vega/core/search_algs/pareto_front.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """A basic ParetoFront class, for multi-objective optimization. @@ -37,12 +43,13 @@ >>> # {id1:hp1, id2:hp2, id6:hp6} """ -import pandas as pd -import pareto + import copy import hashlib -import json import logging +import json +import pandas as pd +from vega.common.pareto_front import get_pareto_index class ParetoFront(object): @@ -52,14 +59,14 @@ class ParetoFront(object): :type cfg: type """ - def __init__(self, object_count=2, max_object_ids=[]): + def __init__(self, object_count=2, max_object_ids=None): """Init for ParetoFront.""" logging.info("start init ParetoFront") - self.sieve_columns = ['config_id', 'md5', 'config'] + self.sieve_columns = ['config_id', 'sha256', 'config'] for i in range(0, object_count): self.sieve_columns.append("score_{}".format(i)) self.sieve_board = pd.DataFrame(columns=self.sieve_columns) - self.max_object_ids = None + self.max_object_ids = [] if isinstance(max_object_ids, list) and len(max_object_ids) > 0: self.max_object_ids = [x + 3 for x in max_object_ids] self.pareto_cols = [x + 3 for x in range(0, object_count)] @@ -85,11 +92,13 @@ def get_pareto_front(self): pareto_board = self.sieve_board.copy() pareto_board = pareto_board.dropna() if not pareto_board.empty: - nondominated = pareto.eps_sort( - [list(pareto_board.itertuples(False))], - objectives=self.pareto_cols, - epsilons=None, - maximize=self.max_object_ids) + for max_id in self.max_object_ids: + pareto_board.iloc[:, max_id] = pareto_board.iloc[:, max_id] * -1 + col_names = [pareto_board.columns[i] for i in self.pareto_cols] + rewards = -1 * pareto_board[col_names].values + indexes = get_pareto_index(rewards).tolist() + nondominated = pareto_board[indexes] + for tmp_list in nondominated: for i, value in enumerate(tmp_list): if i == 2: @@ -107,7 +116,7 @@ def add_pareto_score(self, config_id, score_list): """ tmp_column = self.sieve_columns.copy() tmp_column.remove('config_id') - tmp_column.remove('md5') + tmp_column.remove('sha256') tmp_column.remove('config') self.sieve_board.loc[ (self.sieve_board['config_id'] == config_id), @@ -124,11 +133,11 @@ def _add_to_board(self, id, config): if config is None: return False config_dict = copy.deepcopy(config) - md5 = hashlib.md5(json.dumps(config_dict, sort_keys=True).encode('utf-8')).hexdigest() - found_df = self.sieve_board[self.sieve_board['md5'].str.contains(md5)] + sha256 = hashlib.sha256(json.dumps(config_dict, sort_keys=True).encode('utf-8')).hexdigest() + found_df = self.sieve_board[self.sieve_board['sha256'].str.contains(sha256)] if found_df.shape[0] > 0: return False else: - save_dict = {'config_id': id, 'md5': md5, 'config': config_dict} + save_dict = {'config_id': id, 'sha256': sha256, 'config': config_dict} self.sieve_board = self.sieve_board.append(save_dict, ignore_index=True) return True diff --git a/vega/core/search_algs/pareto_front_conf.py b/vega/core/search_algs/pareto_front_conf.py index 0a6ec14..81b35db 100644 --- a/vega/core/search_algs/pareto_front_conf.py +++ b/vega/core/search_algs/pareto_front_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Configs.""" from vega.common import ConfigSerializable diff --git a/vega/core/search_algs/ps_differential.py b/vega/core/search_algs/ps_differential.py index 94a1127..b158043 100644 --- a/vega/core/search_algs/ps_differential.py +++ b/vega/core/search_algs/ps_differential.py @@ -1,25 +1,31 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """DifferentialAlgorithm.""" import importlib import math import logging -import numpy as np from functools import partial +import numpy as np import vega -from .search_algorithm import SearchAlgorithm from vega.common import ClassFactory, ClassType from vega.networks.network_desc import NetworkDesc from vega.trainer.conf import TrainerConfig from vega.common import ConfigSerializable +from .search_algorithm import SearchAlgorithm if vega.is_torch_backend(): import torch @@ -132,11 +138,17 @@ def _init_loss(self): def step(self, train_x=None, train_y=None, valid_x=None, valid_y=None, lr=None, w_optimizer=None, w_loss=None, unrolled=None, scope_name=None): """Compute one step.""" + def set_opt_requires_grad(value): + for param in self.optimizer.param_groups: + for parameter in param['params']: + parameter.requires_grad = value if vega.is_torch_backend(): + set_opt_requires_grad(True) self.optimizer.zero_grad() loss = w_loss(self.model(valid_x), valid_y) loss.backward() self.optimizer.step() + set_opt_requires_grad(False) return elif vega.is_tf_backend(): self.lr = lr diff --git a/vega/core/search_algs/search_algorithm.py b/vega/core/search_algs/search_algorithm.py index ea62215..ae06c54 100644 --- a/vega/core/search_algs/search_algorithm.py +++ b/vega/core/search_algs/search_algorithm.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """SearchAlgorithm.""" import logging diff --git a/vega/core/search_space/__init__.py b/vega/core/search_space/__init__.py index 45d5a33..62c2162 100644 --- a/vega/core/search_space/__init__.py +++ b/vega/core/search_space/__init__.py @@ -1,5 +1,7 @@ -from .search_space import SearchSpace -from vega.core.search_space.ext_hyper_parameter import * +from vega.core.search_space.ext_hyper_parameter import IntHyperParameter, FloatHyperParameter, \ + FloatExpHyperParameter, IntExpHyperParameter, CatHyperParameter, BoolCatHyperParameter, \ + AdjacencyListHyperParameter, BinaryCodeHyperParameter, HalfCodeHyperParameter +from .search_space import SearchSpace, SpaceSet from .condition_types import ConditionTypes, CONDITION_TYPE_MAP -from .ext_conditions import * +from .ext_conditions import EqualCondition, NotEqualCondition, InCondition from .range_generator import AdjacencyList diff --git a/vega/core/search_space/condition.py b/vega/core/search_space/condition.py index 14999b8..ab704c0 100644 --- a/vega/core/search_space/condition.py +++ b/vega/core/search_space/condition.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Condition class.""" diff --git a/vega/core/search_space/condition_types.py b/vega/core/search_space/condition_types.py index 66a25e3..42aad01 100644 --- a/vega/core/search_space/condition_types.py +++ b/vega/core/search_space/condition_types.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Condition types.""" @@ -19,8 +25,6 @@ class ConditionTypes(Enum): EQUAL = 1 NOT_EQUAL = 2 IN = 3 - # LESS = 4 - # GREATER = 5 CONDITION_TYPE_MAP = { diff --git a/vega/core/search_space/ext_conditions.py b/vega/core/search_space/ext_conditions.py index ac466a2..42913c4 100644 --- a/vega/core/search_space/ext_conditions.py +++ b/vega/core/search_space/ext_conditions.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Extend different Condition classes.""" diff --git a/vega/core/search_space/ext_hyper_parameter.py b/vega/core/search_space/ext_hyper_parameter.py index 7522deb..91823e1 100644 --- a/vega/core/search_space/ext_hyper_parameter.py +++ b/vega/core/search_space/ext_hyper_parameter.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Extend HyperParameter classes.""" @@ -170,7 +176,6 @@ def __init__(self, param_name='param', param_slice=0, param_type=None, param_ran super(CatHyperParameter, self).__init__(param_name, param_slice, param_type, param_range, generator, sample_num) self.list_values = [] self.cat_transform = {} - # Converting array to index map for idx, each in enumerate(self.range): if isinstance(each, list): key = idx @@ -206,7 +211,6 @@ def check_legal(self, value): :rtype: bool. """ - # print("cat check_legal") if self.cast(value) in self.cat_transform: return True else: @@ -219,23 +223,17 @@ def encode(self, x, y=None): :return: transform real `x` to hp's `x`. """ - # Accumulate the scores of each category - # and the number of times that we have used it tmp_cat_transform = {each: (0, 0) for each in self.cat_transform.keys()} for i in range(len(x)): tmp_cat_transform[x[i]] = ( - tmp_cat_transform[x[i]][0] + y[i], # sum score - tmp_cat_transform[x[i]][1] + 1 # count occurrences + tmp_cat_transform[x[i]][0] + y[i], + tmp_cat_transform[x[i]][1] + 1 ) - - # If we have at least one score, compute the average for key, value in tmp_cat_transform.items(): if value[1] != 0: self.cat_transform[key] = value[0] / float(value[1]) else: self.cat_transform[key] = 0 - - # Compute the range using the min and max scores range_max = max( self.cat_transform.keys(), key=(lambda k: self.cat_transform[k]) @@ -253,7 +251,7 @@ def encode(self, x, y=None): return np.vectorize(self.cat_transform.get)(x) - def decode(self, x, forbidden=[]): + def decode(self, x, forbidden=None): """Inverse transform. :param x: input `x`. @@ -261,7 +259,8 @@ def decode(self, x, forbidden=[]): :return: inverse transform `x` back to real `x`. """ - # Compute the inverse dictionary + if forbidden is None: + forbidden = [] inv_map = defaultdict(list) for key, value in self.cat_transform.items(): if key not in forbidden: @@ -272,8 +271,6 @@ def invert(x): diff = (np.abs(keys - x)) min_diff = diff[0] max_key = keys[0] - - # Find the score which is closer to the given value for i in range(len(diff)): if diff[i] < min_diff: min_diff = diff[i] @@ -281,8 +278,6 @@ def invert(x): elif diff[i] == min_diff and keys[i] > max_key: min_diff = diff[i] max_key = keys[i] - - # Get a random category from the ones that had the given score return random.choice(np.vectorize(inv_map.get)(max_key)) if isinstance(x, Iterable): @@ -399,7 +394,6 @@ def decode(self, x, forbidden=''): """ individual = [] size = self.range[0] - # TODO: TEST ONLY from vega.core.pipeline.conf import PipeStepConfig ratio = 0.8 if hasattr(PipeStepConfig.search_space, "prune_ratio"): diff --git a/vega/core/search_space/forbidden.py b/vega/core/search_space/forbidden.py index a266d2b..f478c91 100644 --- a/vega/core/search_space/forbidden.py +++ b/vega/core/search_space/forbidden.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ForbiddenEqualsClause class.""" from .hyper_parameter import HyperParameter diff --git a/vega/core/search_space/hyper_parameter.py b/vega/core/search_space/hyper_parameter.py index b91384f..680f309 100644 --- a/vega/core/search_space/hyper_parameter.py +++ b/vega/core/search_space/hyper_parameter.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """HyperParameter.""" @@ -136,7 +142,7 @@ def __eq__(self, other): """If self is equal to other.""" if isinstance(self, other.__class__): _result = (self.param_type is other.param_type) and (self.is_integer == other.is_integer) and ( - self.range == other.range) + self.range == other.range) return _result return NotImplemented @@ -153,12 +159,14 @@ def allow_greater_less_comparison(self): """ return self.param_type != ParamTypes.BOOL and self.param_type != ParamTypes.CATEGORY - def sample(self, n=1, decode=True): + def sample(self, n=1, decode=True, handler=None): """Random sample one hyper-param.""" if len(self.range) == 1: low, high = 0, self.range[0] else: low, high = self.range + if handler: + low, high = handler(low, high) if self.is_integer: value = np.random.randint(low, high + 1, size=n) else: diff --git a/vega/core/search_space/param_types.py b/vega/core/search_space/param_types.py index fe07776..224904a 100644 --- a/vega/core/search_space/param_types.py +++ b/vega/core/search_space/param_types.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Param types.""" diff --git a/vega/core/search_space/params_factory.py b/vega/core/search_space/params_factory.py index 07be63e..6eb718d 100644 --- a/vega/core/search_space/params_factory.py +++ b/vega/core/search_space/params_factory.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """HyperParameter.""" diff --git a/vega/core/search_space/range_generator.py b/vega/core/search_space/range_generator.py index 22f6dde..96f6688 100644 --- a/vega/core/search_space/range_generator.py +++ b/vega/core/search_space/range_generator.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Generator range values.""" from itertools import product diff --git a/vega/core/search_space/search_space.py b/vega/core/search_space/search_space.py index 8d1801d..e5d9a50 100644 --- a/vega/core/search_space/search_space.py +++ b/vega/core/search_space/search_space.py @@ -1,25 +1,31 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """SearchSpace class.""" -import numpy as np import logging from collections import OrderedDict from queue import Queue +import numpy as np +from vega.common.dag import DAG +from vega.common.class_factory import ClassFactory, ClassType +from vega.core.pipeline.conf import SearchSpaceConfig from .param_types import PARAM_TYPE_MAP from .condition_types import CONDITION_TYPE_MAP from .params_factory import ParamsFactory from .forbidden import ForbiddenAndConjunction, ForbiddenEqualsClause -from dag import DAG, DAGValidationError -from vega.common.class_factory import ClassFactory, ClassType -from vega.core.pipeline.conf import SearchSpaceConfig logger = logging.getLogger(__name__) @@ -50,6 +56,7 @@ def __init__(self, desc=None): self._forbidden_list = [] self._hp_count = 0 self._dag = DAG() + self.handler = None if desc is not None: self.form_desc(desc) @@ -189,9 +196,11 @@ def add_condition(self, condition): except KeyError: raise KeyError('Hyperparameter in condition {} not exist in' 'current SearchSpace.'.format(condition)) + """ except DAGValidationError: raise KeyError('Current condition {} valid DAG rule in current' 'SearchSpace, can not be added!'.format(condition)) + """ if parent_name not in self._condition_dict: self._condition_dict[parent_name] = {} self._condition_dict[parent_name][child_name] = condition @@ -275,7 +284,7 @@ def _get_random_sample_space(self, n): parameters_array = np.zeros((n, self._hp_count)) i = 0 for _, hp in self._params.items(): - column = hp.sample(n=n, decode=False) + column = hp.sample(n=n, decode=False, handler=self.handler) parameters_array[:, i] = column i = i + 1 return parameters_array @@ -353,9 +362,40 @@ def decode(self, param_list): while not q.empty(): parent = q.get() final_param_dict[parent] = inversed_param_dict[parent] - child_list = self._dag.downstream(parent) + child_list = self._dag.next_nodes(parent) for child in child_list: condition = self._condition_dict[parent][child] if condition.evaluate(inversed_param_dict[parent]): q.put(child) return final_param_dict + + +class SpaceSet(object): + """Define a Space set to add search space dict.""" + + def __init__(self, ): + super(SpaceSet, self).__init__() + self._search_space = [] + + def add(self, key, space_type, space_range): + """add one search space dict.""" + self._search_space.append({"key": key, "type": space_type, "range": space_range}) + return self + + def pop(self, idx): + """Pop item by idx.""" + return self._search_space.pop(idx) + + def load(self, space_list): + """Load search space list.""" + for space in space_list: + if type(space) in [list, tuple]: + self.add(*space) + elif isinstance(space, dict): + self.add(**space) + return self.search_space + + @property + def search_space(self): + """Get all search spaces.""" + return SearchSpace(dict(hyperparameters=self._search_space)) diff --git a/vega/core/validation/__init__.py b/vega/core/validation/__init__.py deleted file mode 100644 index 0ae0d99..0000000 --- a/vega/core/validation/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .engine import validation diff --git a/vega/core/validation/engine.py b/vega/core/validation/engine.py deleted file mode 100644 index b1f2b79..0000000 --- a/vega/core/validation/engine.py +++ /dev/null @@ -1,52 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Provide Validation engine.""" -from functools import wraps -from vega.common import ClassFactory, ClassType - - -def validation(class_name): - """Register the class to be verified. - - 获取class_name对象,拦截其在validation中定义的属性并进行校验 - :param class_name: class name - :return: wrapper - """ - - def decorator(cls): - """Provide input param to decorator. - - :param func: wrapper function - :return: decoratpr - """ - # TODO: 需要导入包 - if isinstance(class_name, str): - need_validate_cls = ClassFactory.get_cls(ClassType.CONFIG, class_name) - else: - need_validate_cls = class_name - - @wraps(cls) - def wrapper(*args, **kwargs): - """Make function as a wrapper.""" - valid_attrs = {key: item for key, item in cls.__dict__.items() if not key.startswith('_')} - for attr_name, rules in valid_attrs.items(): - attr_value = getattr(need_validate_cls, attr_name) - if isinstance(rules, list) or isinstance(rules, tuple): - for _rule in rules: - _rule(attr_value) - else: - rules(attr_value) - - return cls(*args, **kwargs) - - return wrapper - - return decorator diff --git a/vega/core/validation/rules.py b/vega/core/validation/rules.py deleted file mode 100644 index 1016b24..0000000 --- a/vega/core/validation/rules.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Provide Validation rules.""" - - -class ValidationError(Exception): - """Validation Error.""" - - pass - - -def not_null(value): - """Check value is not None.""" - if value is None: - raise ValidationError("can not be None") diff --git a/vega/datasets/__init__.py b/vega/datasets/__init__.py index b49eda5..209815a 100644 --- a/vega/datasets/__init__.py +++ b/vega/datasets/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import and register datasets automatically.""" @@ -17,14 +23,14 @@ def Adapter(dataset): """Adapter of dataset.""" if vega.is_torch_backend(): - from .pytorch.adapter import TorchAdapter as Adapter + from .pytorch.adapter import TorchAdapter as Adapter_backend elif vega.is_tf_backend(): - from .tensorflow.adapter import TfAdapter as Adapter + from .tensorflow.adapter import TfAdapter as Adapter_backend elif vega.is_ms_backend(): - from .mindspore.adapter import MsAdapter as Adapter + from .mindspore.adapter import MsAdapter as Adapter_backend else: raise ValueError - return Adapter(dataset) + return Adapter_backend(dataset) def register_datasets(backend): diff --git a/vega/datasets/common/__init__.py b/vega/datasets/common/__init__.py index 9dee406..397e46d 100644 --- a/vega/datasets/common/__init__.py +++ b/vega/datasets/common/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import dataset.""" @@ -21,7 +27,6 @@ "cityscapes": ["Cityscapes"], "div2k_unpair": ["Div2kUnpair"], "fmnist": ["FashionMnist"], - # "imagenet": ["Imagenet"], "mnist": ["Mnist"], "sr_datasets": ["Set5", "Set14", "BSDS100"], "auto_lane_datasets": ["AutoLaneDataset"], @@ -30,4 +35,5 @@ "spatiotemporal": ["SpatiotemporalDataset"], "reds": ["REDS"], "nasbench": ["Nasbench"], + "pacs": ["Pacs"], }) diff --git a/vega/datasets/common/auto_lane_datasets.py b/vega/datasets/common/auto_lane_datasets.py index c1809f5..fccbfa7 100644 --- a/vega/datasets/common/auto_lane_datasets.py +++ b/vega/datasets/common/auto_lane_datasets.py @@ -1,29 +1,35 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is the class for CurveLane dataset.""" import json import numpy as np -from .dataset import Dataset -from vega.common import ClassFactory, ClassType -from .utils.auto_lane_utils import get_img_whc, imread, create_train_subset, create_test_subset -from .utils.auto_lane_utils import load_lines, resize_by_wh, bgr2rgb, imagenet_normalize, load_json -from vega.datasets.common.utils.auto_lane_pointlane_codec import PointLaneCodec -from vega.datasets.conf.auto_lane import AutoLaneConfig from more_itertools import grouper -from vega.common import FileOps from imgaug.augmentables.lines import LineStringsOnImage from imgaug.augmentables.lines import LineString as ia_LineString import imgaug as ia import imgaug.augmenters as iaa +from vega.common import ClassFactory, ClassType +from vega.datasets.common.utils.auto_lane_pointlane_codec import PointLaneCodec +from vega.datasets.conf.auto_lane import AutoLaneConfig +from vega.common import FileOps +from .dataset import Dataset +from .utils.auto_lane_utils import get_img_whc, imread, create_train_subset, create_test_subset +from .utils.auto_lane_utils import load_lines, resize_by_wh, bgr2rgb, imagenet_normalize, load_json def _culane_line_to_curvelane_dict(culane_lines): diff --git a/vega/datasets/common/avazu.py b/vega/datasets/common/avazu.py index ab4280c..77af02c 100644 --- a/vega/datasets/common/avazu.py +++ b/vega/datasets/common/avazu.py @@ -1,22 +1,27 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Avazu dataset.""" - -import numpy as np import logging -from .utils.avazu_util import AVAZUDataset -from .dataset import Dataset +import numpy as np from vega.common import FileOps from vega.datasets.conf.avazu import AvazuConfig from vega.common import ClassFactory, ClassType +from .utils.avazu_util import AVAZUDataset +from .dataset import Dataset @ClassFactory.register(ClassType.DATASET) diff --git a/vega/datasets/common/cifar10.py b/vega/datasets/common/cifar10.py index fcf1c2f..a7d89ba 100644 --- a/vega/datasets/common/cifar10.py +++ b/vega/datasets/common/cifar10.py @@ -1,22 +1,28 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Cifar10 dataset.""" + +import os import numpy as np -from .dataset import Dataset +from PIL import Image from vega.common import ClassFactory, ClassType from vega.common import FileOps from vega.datasets.conf.cifar10 import Cifar10Config -import os -import pickle -from PIL import Image +from .dataset import Dataset @ClassFactory.register(ClassType.DATASET) @@ -49,13 +55,12 @@ def __init__(self, **kwargs): # now load the picked numpy arrays for file_name in files_list: file_path = os.path.join(self.args.data_path, self.base_folder, file_name) - with open(file_path, 'rb') as f: - entry = pickle.load(f, encoding='latin1') - self.data.append(entry['data']) - if 'labels' in entry: - self.targets.extend(entry['labels']) - else: - self.targets.extend(entry['fine_labels']) + entry = FileOps.load_pickle(file_path, encoding='latin1') + self.data.append(entry['data']) + if 'labels' in entry: + self.targets.extend(entry['labels']) + else: + self.targets.extend(entry['fine_labels']) self.data = np.vstack(self.data).reshape(-1, 3, 32, 32) self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC diff --git a/vega/datasets/common/cifar100.py b/vega/datasets/common/cifar100.py index 985bffe..4a97a64 100644 --- a/vega/datasets/common/cifar100.py +++ b/vega/datasets/common/cifar100.py @@ -1,22 +1,28 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Cifar100 dataset.""" -from .dataset import Dataset + +import os +import numpy as np +from PIL import Image from vega.common import ClassFactory, ClassType from vega.common import FileOps from vega.datasets.conf.cifar100 import Cifar100Config -import numpy as np -import os -import pickle -from PIL import Image +from .dataset import Dataset @ClassFactory.register(ClassType.DATASET) @@ -49,13 +55,12 @@ def __init__(self, **kwargs): # now load the picked numpy arrays for file_name in files_list: file_path = os.path.join(self.args.data_path, self.base_folder, file_name) - with open(file_path, 'rb') as f: - entry = pickle.load(f, encoding='latin1') - self.data.append(entry['data']) - if 'labels' in entry: - self.targets.extend(entry['labels']) - else: - self.targets.extend(entry['fine_labels']) + entry = FileOps.load_pickle(file_path, encoding='latin1') + self.data.append(entry['data']) + if 'labels' in entry: + self.targets.extend(entry['labels']) + else: + self.targets.extend(entry['fine_labels']) self.data = np.vstack(self.data).reshape(-1, 3, 32, 32) self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC diff --git a/vega/datasets/common/cityscapes.py b/vega/datasets/common/cityscapes.py index 5b2e1b7..4ef0d66 100644 --- a/vega/datasets/common/cityscapes.py +++ b/vega/datasets/common/cityscapes.py @@ -1,23 +1,28 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is the class of Cityscapes dataset.""" import os.path as osp +import glob import cv2 import numpy as np -import glob -import pickle -from .dataset import Dataset from vega.common import ClassFactory, ClassType from vega.common import FileOps from vega.datasets.conf.city_scapes import CityscapesConfig +from .dataset import Dataset @ClassFactory.register(ClassType.DATASET) @@ -113,7 +118,6 @@ def __getitem__(self, index): :rtype: dict, {'data': xx, 'mask': xx, 'name': name} """ image, label = self.read_fn(index) - # image_name = self.data_files[index].split("/")[-1].split(".")[0] image, label = self.transforms(image, label) image = np.transpose(image, [2, 0, 1]).astype(np.float32) mask = label.astype(np.int64) @@ -175,10 +179,8 @@ def _read_item_pickle(self, index): :return: image in np.array, HWC, bgr; label in np.array, HW :rtype: tuple of np.array """ - with open(self.data_files[index], "rb") as file: - image = pickle.load(file) - with open(self.label_files[index], "rb") as file: - label = pickle.load(file) + image = FileOps.load_pickle(self.data_files[index]) + label = FileOps.load_pickle(self.label_files[index]) return image, label @property diff --git a/vega/datasets/common/cls_ds.py b/vega/datasets/common/cls_ds.py index 37beb8e..b059266 100644 --- a/vega/datasets/common/cls_ds.py +++ b/vega/datasets/common/cls_ds.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for classification dataset.""" diff --git a/vega/datasets/common/coco.py b/vega/datasets/common/coco.py index a34bebb..281bff9 100644 --- a/vega/datasets/common/coco.py +++ b/vega/datasets/common/coco.py @@ -1,26 +1,28 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """This is a class for coco dataset.""" -import logging + import os -import json -import numpy as np -import torch from PIL import Image from vega.common import ClassFactory, ClassType from vega.datasets.conf.coco import CocoConfig from vega.datasets.common.dataset import Dataset -from pycocotools import mask as coco_mask from pycocotools.coco import COCO -from vega.common.task_ops import TaskOps @ClassFactory.register(ClassType.DATASET) @@ -108,115 +110,3 @@ def _count_visible_keypoints(anno): def collate_fn(batch): """Collate fn for data loader.""" return tuple(zip(*batch)) - - -@ClassFactory.register(ClassType.DATASET) -class DetectionDataset(Dataset): - """Detection common dataset.""" - - config = CocoConfig() - - def __init__(self, **kwargs): - """Construct the Detection Dataset class.""" - super(DetectionDataset, self).__init__(**kwargs) - self.imgs = list(sorted(os.listdir(os.path.join(self.args.data_root, self.args.img_prefix)))) - self.masks = list(sorted(os.listdir(os.path.join(self.args.data_root, self.args.ann_prefix)))) - portion = self.args.test_size - self.imgs = self.imgs[:-portion] if self.mode == 'train' else self.imgs[-portion:] - self.masks = self.masks[:-portion] if self.mode == 'train' else self.masks[-portion:] - self.collate_fn = collate_fn - convert_to_coco_api(self) - - def __getitem__(self, idx): - """Get an item of the dataset according to the index.""" - # load images and masks - img_path = os.path.join(self.args.data_root, self.args.img_prefix, self.imgs[idx]) - mask_path = os.path.join(self.args.data_root, self.args.ann_prefix, self.masks[idx]) - img = Image.open(img_path).convert("RGB") - mask = Image.open(mask_path) - mask = np.array(mask) - obj_ids = np.unique(mask) - obj_ids = obj_ids[1:] - masks = mask == obj_ids[:, None, None] - num_objs = len(obj_ids) - boxes = [] - for i in range(num_objs): - pos = np.where(masks[i]) - xmin = np.min(pos[1]) - xmax = np.max(pos[1]) - ymin = np.min(pos[0]) - ymax = np.max(pos[0]) - boxes.append([xmin, ymin, xmax, ymax]) - boxes = torch.as_tensor(boxes, dtype=torch.float32) - labels = torch.ones((num_objs,), dtype=torch.int64) - masks = torch.as_tensor(masks, dtype=torch.uint8) - image_id = torch.tensor([idx]) - area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]) - iscrowd = torch.zeros((num_objs,), dtype=torch.int64) - - target = {} - target["boxes"] = boxes - target["labels"] = labels - target["masks"] = masks - target["image_id"] = image_id - target["area"] = area - target["iscrowd"] = iscrowd - - if self.transforms is not None: - img, target = self.transforms(img, target) - - return img, target - - def __len__(self): - """Get the length of the dataset.""" - return len(self.imgs) - - -def convert_to_coco_api(ds): - """Convert to coco dataset.""" - coco_ds = COCO() - ann_id = 1 - dataset = {'images': [], 'categories': [], 'annotations': []} - categories = set() - for img_idx in range(len(ds)): - img, targets = ds[img_idx] - image_id = targets["image_id"].item() - img_dict = {} - img_dict['id'] = image_id - img_dict['height'] = img.shape[-2] - img_dict['width'] = img.shape[-1] - dataset['images'].append(img_dict) - bboxes = targets["boxes"] - bboxes[:, 2:] -= bboxes[:, :2] - bboxes = bboxes.tolist() - labels = targets['labels'].tolist() - areas = targets['area'].tolist() - iscrowd = targets['iscrowd'].tolist() - if 'masks' in targets: - masks = targets['masks'] - masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1) - if 'keypoints' in targets: - keypoints = targets['keypoints'] - keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist() - num_objs = len(bboxes) - for i in range(num_objs): - ann = {} - ann['image_id'] = image_id - ann['bbox'] = bboxes[i] - ann['category_id'] = labels[i] - categories.add(labels[i]) - ann['area'] = areas[i] - ann['iscrowd'] = iscrowd[i] - ann['id'] = ann_id - if 'keypoints' in targets: - ann['keypoints'] = keypoints[i] - ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3]) - dataset['annotations'].append(ann) - ann_id += 1 - dataset['categories'] = [{'id': i} for i in sorted(categories)] - coco_ds.dataset = dataset - coco_ds.createIndex() - instances_val = os.path.join(TaskOps().local_output_path, 'instances.json') - json.dump(coco_ds.dataset, open(instances_val, 'w')) - logging.info("dump detection instances json file: {}".format(instances_val)) - return coco_ds diff --git a/vega/datasets/common/dataset.py b/vega/datasets/common/dataset.py index 5c840d0..8892c60 100644 --- a/vega/datasets/common/dataset.py +++ b/vega/datasets/common/dataset.py @@ -1,22 +1,27 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a base class of the dataset.""" import importlib -from vega.common.task_ops import TaskOps -from .utils.transforms import Transforms -from vega.common import ClassFactory, ClassType +from vega.common import ClassFactory, ClassType, TaskOps from vega.common.config import Config from vega.common import update_dict from vega.datasets import Adapter +from .utils.transforms import Transforms class Dataset(TaskOps): @@ -43,7 +48,6 @@ def __init__(self, hps=None, mode='train', **kwargs): if mode == "val" and not hasattr(self.config, "val") and not hasattr(self.config.common, "train_portion"): self.mode = "test" - # modify config from kwargs, `Cifar10(mode='test', data_path='/cache/datasets')` if kwargs: self.args = Config(kwargs) if hasattr(self, 'config'): @@ -56,8 +60,6 @@ def __init__(self, hps=None, mode='train', **kwargs): self.train = self.mode in ["train", "val"] transforms_list = self._init_transforms() self._transforms = Transforms(transforms_list) - # if "transforms" in kwargs.keys(): - # self._transforms.__transform__ = kwargs["transforms"] self.dataset_init() self.world_size = 1 self.rank = 0 diff --git a/vega/datasets/common/div2k.py b/vega/datasets/common/div2k.py index 527ed79..e95b238 100644 --- a/vega/datasets/common/div2k.py +++ b/vega/datasets/common/div2k.py @@ -1,21 +1,27 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is the class for DIV2K dataset.""" import os import os.path -from .utils import div2k_util as util -from .dataset import Dataset from vega.common import ClassFactory, ClassType from vega.common import FileOps from vega.datasets.conf.div2k import DIV2KConfig +from .utils import div2k_util as util +from .dataset import Dataset @ClassFactory.register(ClassType.DATASET) diff --git a/vega/datasets/common/div2k_unpair.py b/vega/datasets/common/div2k_unpair.py index c45dd71..156e74b 100644 --- a/vega/datasets/common/div2k_unpair.py +++ b/vega/datasets/common/div2k_unpair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is the class for DIV2K dataset for the unpaird setting.""" import logging @@ -15,11 +21,11 @@ import random from PIL import Image from PIL import ImageFile -from .dataset import Dataset from torchvision import transforms from vega.common import ClassFactory, ClassType from vega.common import FileOps from vega.datasets.conf.div2k import DIV2KConfig +from .dataset import Dataset ImageFile.LOAD_TRUNCATED_IMAGES = True diff --git a/vega/datasets/common/fmnist.py b/vega/datasets/common/fmnist.py index d0d5398..49bc154 100644 --- a/vega/datasets/common/fmnist.py +++ b/vega/datasets/common/fmnist.py @@ -1,20 +1,26 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for fashionmnist dataset.""" from torchvision.datasets import FashionMNIST -from .dataset import Dataset from vega.common import ClassFactory, ClassType from vega.common import FileOps from vega.datasets.conf.fashion_mnist import FashionMnistConfig +from .dataset import Dataset @ClassFactory.register(ClassType.DATASET) diff --git a/vega/datasets/common/glue.py b/vega/datasets/common/glue.py deleted file mode 100644 index e50f4e6..0000000 --- a/vega/datasets/common/glue.py +++ /dev/null @@ -1,200 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""This is a class for Glue dataset.""" -import logging -import json -import numpy as np -from collections import namedtuple -from tqdm import tqdm, trange -from vega.datasets.common.dataset import Dataset -from pytorch_pretrained_bert import BertTokenizer -from vega.common.class_factory import ClassType, ClassFactory -from pathlib import Path -from .utils.data_processor import processors, output_modes -from ..conf.glue import GlueConfig - -InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids label_id seq_length is_next") - - -@ClassFactory.register(ClassType.DATASET) -class GlueDataset(Dataset): - """Glue Dataset.""" - - config = GlueConfig() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - tokenizer = BertTokenizer.from_pretrained(self.args.vocab_file, do_lower_case=self.args.do_lower_case) - if self.args.pregenerated: - self.features = read_features_from_file(tokenizer, self.args.data_path) - else: - processor = processors[self.args.task_name]() - train_examples = processor.get_examples(self.mode, self.args.data_path) - label_list = processor.get_labels() - output_mode = output_modes[self.args.task_name] - self.features = convert_examples_to_features(train_examples, label_list, self.args.max_seq_length, - tokenizer, output_mode) - - def __len__(self): - """Get the length of the dataset.""" - return len(self.features) - - def __getitem__(self, item): - """Get an item of the dataset according to the index.""" - feature = self.features[item] - data = dict(input_ids=feature.input_ids, attention_mask=feature.input_mask, token_type_ids=feature.segment_ids) - labels = feature.label_id - # next_sentence_label=int(self.is_nexts[item])) - return data, labels - - -def read_features_from_file(tokenizer, data_path): - """Read features from file.""" - logging.info('data_path: {}'.format(data_path)) - data_path = Path(data_path) - data_file = data_path / "epoch_0.json" - metrics_file = data_path / "epoch_0_metrics.json" - logging.info('data_file: {}'.format(data_file)) - logging.info('metrics_file: {}'.format(metrics_file)) - assert data_file.is_file() and metrics_file.is_file() - metrics = json.loads(metrics_file.read_text()) - num_samples = metrics['num_training_examples'] - seq_len = metrics['max_seq_len'] - features = [] - with data_file.open() as f: - for i, line in enumerate(tqdm(f, total=num_samples, desc="Training examples")): - line = line.strip() - example = json.loads(line) - feature = convert_example_to_features(example, tokenizer, seq_len) - features.append(feature) - logging.info("Loading complete!") - return features - - -def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_mode): - """Load a data file into a list of `InputBatch`s.""" - label_map = {label: i for i, label in enumerate(label_list)} - features = [] - for (ex_index, example) in enumerate(examples): - if ex_index % 10000 == 0: - logging.info("Writing example %d of %d" % (ex_index, len(examples))) - - tokens_a = tokenizer.tokenize(example.text_a) - - tokens_b = None - if example.text_b: - tokens_b = tokenizer.tokenize(example.text_b) - _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) - else: - if len(tokens_a) > max_seq_length - 2: - tokens_a = tokens_a[:(max_seq_length - 2)] - - tokens = ["[CLS]"] + tokens_a + ["[SEP]"] - segment_ids = [0] * len(tokens) - - if tokens_b: - tokens += tokens_b + ["[SEP]"] - segment_ids += [1] * (len(tokens_b) + 1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - input_mask = [1] * len(input_ids) - seq_length = len(input_ids) - - padding = [0] * (max_seq_length - len(input_ids)) - input_ids += padding - input_mask += padding - segment_ids += padding - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - if output_mode == "classification": - label_id = label_map[example.label] - elif output_mode == "regression": - label_id = float(example.label) - else: - raise KeyError(output_mode) - - if ex_index < 1: - logging.info("*** Example ***") - logging.info("guid: %s" % (example.guid)) - logging.info("tokens: %s" % " ".join( - [str(x) for x in tokens])) - logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) - logging.info( - "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - logging.info("label: {}".format(example.label)) - logging.info("label_id: {}".format(label_id)) - - features.append( - InputFeatures(input_ids=np.array(input_ids), - input_mask=np.array(input_mask), - segment_ids=np.array(segment_ids), - label_id=np.array(label_id), - seq_length=np.array(seq_length), - is_next=None)) - return features - - -def _truncate_seq_pair(tokens_a, tokens_b, max_length): - """Truncate a sequence pair in place to the maximum length.""" - while True: - total_length = len(tokens_a) + len(tokens_b) - if total_length <= max_length: - break - if len(tokens_a) > len(tokens_b): - tokens_a.pop() - else: - tokens_b.pop() - - -def convert_example_to_features(example, tokenizer, max_seq_length): - """Convert example.""" - tokens = example["tokens"] - segment_ids = example["segment_ids"] - is_random_next = example["is_random_next"] - masked_lm_positions = example["masked_lm_positions"] - masked_lm_labels = example["masked_lm_labels"] - - if len(tokens) > max_seq_length: - logging.info('len(tokens): {}'.format(len(tokens))) - logging.info('tokens: {}'.format(tokens)) - tokens = tokens[:max_seq_length] - - if len(tokens) != len(segment_ids): - logging.info('tokens: {}\nsegment_ids: {}'.format(tokens, segment_ids)) - segment_ids = [0] * len(tokens) - - assert len(tokens) == len(segment_ids) <= max_seq_length # The preprocessed data should be already truncated - input_ids = tokenizer.convert_tokens_to_ids(tokens) - masked_label_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels) - - input_array = np.zeros(max_seq_length, dtype=np.int64) - input_array[:len(input_ids)] = input_ids - - mask_array = np.zeros(max_seq_length, dtype=np.int64) - mask_array[:len(input_ids)] = 1 - - segment_array = np.zeros(max_seq_length, dtype=np.int64) - segment_array[:len(segment_ids)] = segment_ids - - lm_label_array = np.full(max_seq_length, dtype=np.int64, fill_value=-1) - lm_label_array[masked_lm_positions] = masked_label_ids - - features = InputFeatures(input_ids=input_array, - input_mask=mask_array, - segment_ids=segment_array, - label_id=lm_label_array, - seq_length=None, - is_next=is_random_next) - return features diff --git a/vega/datasets/common/imagenet.py b/vega/datasets/common/imagenet.py index 9cf9c2f..f1fe63f 100644 --- a/vega/datasets/common/imagenet.py +++ b/vega/datasets/common/imagenet.py @@ -1,21 +1,27 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Imagenet dataset.""" from torchvision.datasets import ImageFolder -from .dataset import Dataset from vega.datasets.transforms.Compose import Compose from vega.common import ClassFactory, ClassType from vega.common import FileOps from vega.datasets.conf.imagenet import ImagenetConfig +from .dataset import Dataset @ClassFactory.register(ClassType.DATASET) diff --git a/vega/datasets/common/mnist.py b/vega/datasets/common/mnist.py index 43ebcce..be2ee3c 100644 --- a/vega/datasets/common/mnist.py +++ b/vega/datasets/common/mnist.py @@ -1,20 +1,26 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Mnist dataset.""" from torchvision.datasets import MNIST -from .dataset import Dataset from vega.common import ClassFactory, ClassType from vega.common import FileOps from vega.datasets.conf.mnist import MnistConfig +from .dataset import Dataset @ClassFactory.register(ClassType.DATASET) diff --git a/vega/datasets/common/mrpc.py b/vega/datasets/common/mrpc.py deleted file mode 100644 index 9cc1afa..0000000 --- a/vega/datasets/common/mrpc.py +++ /dev/null @@ -1,184 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""This is a class for Bert Tokenizer.""" -import logging -import os -import csv -from vega.datasets.common.dataset import Dataset -from vega.common import ClassFactory, ClassType -from ..conf.mrpc import MrpcConfig -from vega.common.config import Config -from pytorch_pretrained_bert import BertTokenizer - - -@ClassFactory.register(ClassType.DATASET) -class MrpcDataset(Dataset): - """MRPC data set (GLUE version).""" - - config = MrpcConfig() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - label_list = self.get_labels() - - tokenizer = BertTokenizer.from_pretrained(self.args.vocab_file, do_lower_case=self.args.do_lower_case) - if tokenizer is None: - raise ValueError("Tokenizer can't be None.") - if self.mode == 'train': - examples = self.get_train_examples(self.args.data_path) - elif self.mode == 'val': - examples = self.get_val_examples(self.args.data_path) - else: - examples = self.get_test_examples(self.args.data_path) - self.examples = self.convert_examples_to_features(examples, label_list, self.args.max_seq_length, tokenizer) - - def __getitem__(self, idx): - """Get item.""" - example = self.examples[idx] - input_ids = example.get('input_ids') - input_mask = example.get('input_mask') - segment_ids = example.get('segment_ids') - label_ids = example.get('label_id') - if self.transforms is not None: - input_ids, input_mask, segment_ids, label_ids = self.transforms(input_ids, input_mask, segment_ids, - label_ids) - target = label_ids - data = dict(input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, labels=label_ids) - return data, target - - def __len__(self): - """Get the length of the dataset.""" - return len(self.examples) - - def get_train_examples(self, data_dir): - """See base class.""" - return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_val_examples(self, data_dir): - """See base class.""" - return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "dev") - - def get_test_examples(self, data_dir): - """See base class.""" - return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") - - def get_labels(self): - """See base class.""" - return ["0", "1"] - - def _create_examples(self, lines, set_type): - """Create examples for the training, dev and test sets.""" - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, i) - text_a = line[3] - text_b = line[4] - label = None if set_type == "test" else line[0] - examples.append(Config(dict(guid=guid, text_a=text_a, text_b=text_b, label=label))) - return examples - - @classmethod - def _read_tsv(cls, input_file, quotechar=None): - """Read a tab separated value file.""" - with open(input_file, "r", encoding="utf-8-sig") as f: - return list(csv.reader(f, delimiter="\t", quotechar=quotechar)) - - def convert_examples_to_features(self, examples, label_list, max_seq_length, tokenizer): - """Load a data file into a list of `InputBatch`s.""" - label_map = {label: i for i, label in enumerate(label_list)} - features = [] - for (ex_index, example) in enumerate(examples): - tokens_a = tokenizer.tokenize(example.text_a) - - tokens_b = None - if example.text_b: - tokens_b = tokenizer.tokenize(example.text_b) - # Modifies `tokens_a` and `tokens_b` in place so that the total - # length is less than the specified length. - # Account for [CLS], [SEP], [SEP] with "- 3" - _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3) - else: - # Account for [CLS] and [SEP] with "- 2" - if len(tokens_a) > max_seq_length - 2: - tokens_a = tokens_a[:(max_seq_length - 2)] - - # The convention in BERT is: - # (a) For sequence pairs: - # tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP] - # type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1 - # (b) For single sequences: - # tokens: [CLS] the dog is hairy . [SEP] - # type_ids: 0 0 0 0 0 0 0 - # - # Where "type_ids" are used to indicate whether this is the first - # sequence or the second sequence. The embedding vectors for `type=0` and - # `type=1` were learned during pre-training and are added to the wordpiece - # embedding vector (and position vector). This is not *strictly* necessary - # since the [SEP] token unambigiously separates the sequences, but it makes - # it easier for the model to learn the concept of sequences. - # - # For classification tasks, the first vector (corresponding to [CLS]) is - # used as as the "sentence vector". Note that this only makes sense because - # the entire model is fine-tuned. - tokens = ["[CLS]"] + tokens_a + ["[SEP]"] - segment_ids = [0] * len(tokens) - - if tokens_b: - tokens += tokens_b + ["[SEP]"] - segment_ids += [1] * (len(tokens_b) + 1) - - input_ids = tokenizer.convert_tokens_to_ids(tokens) - - # The mask has 1 for real tokens and 0 for padding tokens. Only real - # tokens are attended to. - input_mask = [1] * len(input_ids) - - # Zero-pad up to the sequence length. - padding = [0] * (max_seq_length - len(input_ids)) - input_ids += padding - input_mask += padding - segment_ids += padding - - assert len(input_ids) == max_seq_length - assert len(input_mask) == max_seq_length - assert len(segment_ids) == max_seq_length - - label_id = label_map[example.label] - if ex_index < 5: - logging.info("*** Example ***") - logging.info("guid: %s" % (example.guid)) - logging.info("tokens: %s" % " ".join([str(x) for x in tokens])) - logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) - logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) - logging.info( - "segment_ids: %s" % " ".join([str(x) for x in segment_ids])) - logging.info("label: %s (id = %d)" % (example.label, label_id)) - - features.append(Config( - dict(input_ids=input_ids, - input_mask=input_mask, - segment_ids=segment_ids, - label_id=label_id))) - return features - - -def _truncate_seq_pair(tokens_a, tokens_b, max_length): - """Truncate a sequence pair in place to the maximum length.""" - while True: - total_length = len(tokens_a) + len(tokens_b) - if total_length <= max_length: - break - if len(tokens_a) > len(tokens_b): - tokens_a.pop() - else: - tokens_b.pop() diff --git a/vega/datasets/common/nasbench.py b/vega/datasets/common/nasbench.py index 487867d..a1551d8 100644 --- a/vega/datasets/common/nasbench.py +++ b/vega/datasets/common/nasbench.py @@ -1,21 +1,27 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Nasbench dataset.""" import random import numpy as np from vega.common import ClassFactory, ClassType from vega.common import FileOps -from .dataset import Dataset from vega.datasets.conf.nasbench import NasbenchConfig from nasbench import api +from .dataset import Dataset VALID_OPS = ['input', 'output', 'conv1x1-bn-relu', 'conv3x3-bn-relu', 'maxpool3x3'] diff --git a/vega/datasets/common/pacs.py b/vega/datasets/common/pacs.py new file mode 100644 index 0000000..a3b5058 --- /dev/null +++ b/vega/datasets/common/pacs.py @@ -0,0 +1,138 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This is a class for Pacs dataset.""" +import os +from sklearn.model_selection import train_test_split +from PIL import Image +from vega.common import ClassFactory, ClassType +from vega.common import FileOps +from vega.datasets.conf.pacs import PacsConfig +from .dataset import Dataset + + +@ClassFactory.register(ClassType.DATASET) +class Pacs(Dataset): + """This is a class for Pacs dataset. + + :param mode: `train`,`val` or `test`, defaults to `train` + :type mode: str, optional + :param cfg: the config the dataset need, defaults to None, and if the cfg is None, + the default config will be used, the default config file is a yml file with the same name of the class + :type cfg: yml, py or dict + """ + + config = PacsConfig() + + def __init__(self, **kwargs): + """Construct the Pacs class.""" + Dataset.__init__(self, **kwargs) + self.args.data_path = FileOps.download_dataset(self.args.data_path) + targetdomain = self.args.targetdomain + domain = ['cartoon', 'art_painting', 'photo', 'sketch'] + if self.train: + domain.remove(targetdomain) + else: + domain = [targetdomain] + full_data = [] + label_name = [] + full_concept = [] + for k, domain_name in enumerate(domain): + split_path = os.path.join(self.args.split_path, domain_name + '_all' + '.txt') + images, labels = self._dataset_info(split_path) + concept = [k] * len(labels) + full_data.extend(images) + label_name.extend(labels) + full_concept.extend(concept) + + classes = list(set(label_name)) + classes.sort() + class_to_idx = {classes[i]: i for i in range(len(classes))} + full_label = [class_to_idx[x] for x in label_name] + if self.train: + name_train, name_val, labels_train, labels_val, concepts_train, concepts_val = \ + train_test_split(full_data, full_label, full_concept, train_size=self.args.train_portion) + if self.mode == "train": + self.data = name_train + self.label = labels_train + self.concept = concepts_train + else: + self.data = name_val + self.label = labels_val + self.concept = concepts_val + else: + self.data, self.label = full_data, full_label + self.concept = [0] * len(self.data) + + def __getitem__(self, index): + """Get an item of the dataset according to the index. + + :param index: index + :type index: int + :return: an item of the dataset according to the index + :rtype: tuple + """ + data, label, concept = self.data[index], self.label[index], self.concept[index] + img = Image.open(data).convert('RGB') + if self.transforms is not None: + img = self.transforms(img) + if self.args.task == 'nas_ood': + return {'input':img, 'target': label, 'concept': concept}, label + return img, (label, concept) + + def _dataset_info(self, txt_labels): + with open(txt_labels, 'r') as f: + images_list = f.readlines() + + file_names = [] + labels = [] + for row in images_list: + row = row.split(' ') + path = os.path.join(self.args.data_path, row[0]) + path = path.replace('\\', '/') + file_names.append(path) + labels.append(int(row[1])) + return file_names, labels + + def __len__(self): + """Get the length of the dataset. + + :return: the length of the dataset + :rtype: int + """ + return len(self.data) + + @property + def input_channels(self): + """Input channel number of the pacs image. + + :return: the channel number + :rtype: int + """ + _shape = self.data.shape + _input_channels = 3 if len(_shape) == 4 else 1 + return _input_channels + + @property + def input_size(self): + """Input size of pacs image. + + :return: the input size + :rtype: int + """ + _shape = self.data.shape + return _shape[1] + diff --git a/vega/datasets/common/reds.py b/vega/datasets/common/reds.py deleted file mode 100644 index b5c6a6c..0000000 --- a/vega/datasets/common/reds.py +++ /dev/null @@ -1,142 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""This is a class for Cifar10 dataset.""" -import os -import numpy as np -import random -from .dataset import Dataset -from vega.common import ClassFactory, ClassType -from vega.datasets.conf.reds import REDSConfig -from vega.datasets.common.utils.reds_util import read_file, imfrombytes, img2tensor, augment, paired_random_crop - - -@ClassFactory.register(ClassType.DATASET) -class REDS(Dataset): - """This is a class for reds dataset.""" - - config = REDSConfig() - - def __init__(self, **kwargs): - """Construct the Cifar10 class.""" - Dataset.__init__(self, **kwargs) - self.train = self.mode == 'train' - self.gt_root, self.lq_root = self.args['dataroot_gt'], self.args['dataroot_lq'] - if self.args['num_frame'] % 2 != 1: - raise Exception('num_frame should be odd number, but got {}'.format(self.args["num_frame"])) - self.num_frame = self.args['num_frame'] - self.num_half_frames = self.args['num_frame'] // 2 - - self.keys = [] - with open(self.args['meta_info_file'], 'r') as fin: - for line in fin: - folder, frame_num, _ = line.split(' ') - self.keys.extend( - [f'{folder}/{i:08d}' for i in range(int(frame_num))]) - - # remove the video clips used in validation - if self.train: - if self.args['val_partition'] == 'REDS4': - val_partition = ['000', '011', '015', '020'] - elif self.args['val_partition'] == 'official': - val_partition = [f'{v:03d}' for v in range(240, 270)] - else: - raise ValueError( - f'Wrong validation partition {self.args["val_partition"]}.' - f"Supported ones are ['official', 'REDS4'].") - self.keys = [v for v in self.keys if v.split('/')[0] not in val_partition] - - # temporal augmentation configs - self.interval_list = self.args.get('interval_list', [1]) - self.random_reverse = self.args.get('random_reverse', False) - - def __getitem__(self, index): - """Get an item of the dataset according to the index. - - :param index: index - :type index: int - :return: an item of the dataset according to the index - :rtype: array of numpy, array of numpy - """ - key = self.keys[index] - clip_name, frame_name = key.split('/') # key example: 000/00000000 - center_frame_idx = int(frame_name) - - # determine the neighboring frames - if self.train: - interval = random.choice(self.interval_list) - else: - interval = 1 - - # ensure not exceeding the borders - frame_name = f'{center_frame_idx:08d}' - start_frame_idx = center_frame_idx - self.num_half_frames * interval - end_frame_idx = center_frame_idx + self.num_half_frames * interval - if self.train: - # each clip has 100 frames starting from 0 to 99 - while (start_frame_idx < 0) or (end_frame_idx > 99): - center_frame_idx = random.randint(0, 99) - start_frame_idx = center_frame_idx - self.num_half_frames * interval - end_frame_idx = center_frame_idx + self.num_half_frames * interval - frame_name = f'{center_frame_idx:08d}' - neighbor_list = list( - range(center_frame_idx - self.num_half_frames * interval, - center_frame_idx + self.num_half_frames * interval + 1, - interval)) - # random reverse - if self.random_reverse and random.random() < 0.5: - neighbor_list.reverse() - else: - neighbor_list = [] - for i in range(start_frame_idx, end_frame_idx + 1): - idx = i - if i < 0: - idx = center_frame_idx + self.num_half_frames - i - elif i > 99: - idx = (center_frame_idx - self.num_half_frames) - (i - 99) - neighbor_list.append(idx) - - if len(neighbor_list) != self.num_frame: - raise Exception('Wrong length of neighbor list: {}'.format(len(neighbor_list))) - - # get the GT frame (as the center frame) - img_gt_path = os.path.join(self.gt_root, clip_name, f'{frame_name}.png') - img_bytes = read_file(img_gt_path) - img_gt = imfrombytes(img_bytes, float32=True) - - # get the neighboring LQ frames - img_lqs = [] - for neighbor in neighbor_list: - img_lq_path = os.path.join(self.lq_root, clip_name, f'{neighbor:08d}.png') - img_bytes = read_file(img_lq_path) - img_lq = imfrombytes(img_bytes, float32=True) - img_lqs.append(img_lq) - - if self.train: - scale = self.args['scale'] - gt_size = self.args['gt_size'] - # randomly crop - img_gt, img_lqs = paired_random_crop(img_gt, img_lqs, gt_size, scale, - img_gt_path) - - # augmentation - flip, rotate - img_lqs.append(img_gt) - img_results = augment(img_lqs, self.args['use_flip'], self.args['use_rot']) - img_lqs, img_gt = img_results[0:-1], img_results[-1] - - img_lqs = img2tensor(img_lqs) - img_lqs = np.stack(img_lqs, axis=0) - img_gt = img2tensor(img_gt) - - return img_lqs, img_gt - - def __len__(self): - """Get the length of dataset.""" - return len(self.keys) diff --git a/vega/datasets/common/spatiotemporal.py b/vega/datasets/common/spatiotemporal.py index 594953a..1f4d2ce 100644 --- a/vega/datasets/common/spatiotemporal.py +++ b/vega/datasets/common/spatiotemporal.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Spatiotemporal dataset.""" import os diff --git a/vega/datasets/common/sr_datasets.py b/vega/datasets/common/sr_datasets.py index 541ad74..891275e 100644 --- a/vega/datasets/common/sr_datasets.py +++ b/vega/datasets/common/sr_datasets.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is the class for SR dataset.""" from vega.common import ClassFactory, ClassType diff --git a/vega/datasets/common/utils/auto_lane_codec_utils.py b/vega/datasets/common/utils/auto_lane_codec_utils.py index d7c2799..14dda12 100644 --- a/vega/datasets/common/utils/auto_lane_codec_utils.py +++ b/vega/datasets/common/utils/auto_lane_codec_utils.py @@ -1,17 +1,24 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This script is used to process the auto lane dataset.""" -import numpy as np -import cv2 + import json +import cv2 +import numpy as np class Point: @@ -55,8 +62,8 @@ class Lane: def __init__(self, prob=0, start_pos=0, end_pos=0, anchor_x=0, anchor_y=0, type=0, lane=np.array([])): self.prob = prob - self.start_pos = start_pos # the position for start points in location array - self.end_pos = end_pos # the position for start points in location array + self.start_pos = start_pos + self.end_pos = end_pos self.lane = lane self.idx = 0 self.ax = anchor_x @@ -204,22 +211,15 @@ def order_lane_x_axis(lane_set, h): for i in range(len(lane_set)): lane_with_cross_k = LaneWithCrossK(lane_set[i], i, cross_y) lanes_crossk.append(lane_with_cross_k) - # print(lane_with_cross_k.cross_x) - # print(lane_with_cross_k.k) lanes_crossk_sorted = sorted(lanes_crossk) - # for i in range(len(lanes_crossk_sorted)): - # print(lanes_crossk_sorted[i].k) - - # find current lanes right_pos = len(lanes_crossk_sorted) for i in range(len(lanes_crossk_sorted)): if lanes_crossk_sorted[i].k > 0: right_pos = i break - # assign lane index lane_idx = [None] * len(lanes_crossk_sorted) idx = -1 for i in range(right_pos - 1, -1, -1): @@ -230,8 +230,6 @@ def order_lane_x_axis(lane_set, h): lane_idx[i] = idx idx += 1 - # print(lane_idx) - lanes_final = list() for i in range(len(lanes_crossk_sorted)): lanes_crossk_sorted[i].lane.idx = lane_idx[i] @@ -314,7 +312,7 @@ def delete_repeat_y(cur_line): list_x.append(pt.x) list_y.append(pt.y) - sorted_y = sorted(list_y) # y from up--->down + sorted_y = sorted(list_y) sorted_x = [] for i in range(len(sorted_y)): sorted_x.append(list_x[list_y.index(sorted_y[i])]) @@ -335,7 +333,7 @@ def delete_repeat_y(cur_line): for i in range(len(set_sorted_y)): new_lane.append({"x": set_sorted_x[i], "y": set_sorted_y[i]}) if new_lane[0]["y"] < new_lane[1]["y"]: - new_lane = new_lane[::-1] # y from big to small + new_lane = new_lane[::-1] return new_lane @@ -439,7 +437,6 @@ def gettopk_idx(gt_dist_list): distance_list.append(cur_distance) top_idx = np.argsort(distance_list)[:1] - # top_2_values = [ distance_list[i] for i in top_2_idx] return top_idx diff --git a/vega/datasets/common/utils/auto_lane_pointlane_codec.py b/vega/datasets/common/utils/auto_lane_pointlane_codec.py index 1bc17f6..4c3d951 100644 --- a/vega/datasets/common/utils/auto_lane_pointlane_codec.py +++ b/vega/datasets/common/utils/auto_lane_pointlane_codec.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This script is used to process the auto lane dataset.""" @@ -69,7 +75,6 @@ def encode_lane(self, lane_object, org_width, org_height): s_y = self.input_height * 1.0 / org_height gt_lanes_list = get_lane_list(lane_object, s_x, s_y) if len(gt_lanes_list) < 1: - # background image gt_lane_offset = np.zeros(shape=(self.feature_size, self.points_per_line * 2 + 1), dtype=float) gt_lane_type = np.zeros(shape=(self.feature_size, self.class_num), dtype=float) gt_lane_type[:, 0] = 1 @@ -77,9 +82,6 @@ def encode_lane(self, lane_object, org_width, org_height): gt_type = gt_lane_type.astype(np.float32) else: lane_set = trans_to_lane_with_type(gt_lanes_list) - # sort_lanes = order_lane_x_axis(lane_set, self.input_height) - # lane_set= - # lane_set = ensure_yshape_lines_order(sort_lanes, self.input_width, self.input_height) all_anchor_count = np.zeros(shape=(self.feature_height, self.feature_width)) all_anchor_distance = list() all_anchor_loc = list() @@ -95,10 +97,9 @@ def encode_lane(self, lane_object, org_width, org_height): y_list = [] else: interp_lane = spline_interp(lane=new_lane, step_t=1) - # x_pt_list, y_pt_list = trans_to_pt_list(interp_lane) x_pt_list, y_pt_list = delete_nearby_point(interp_lane) x_pt_list = x_pt_list[::-1] - y_pt_list = y_pt_list[::-1] # y from small to big + y_pt_list = y_pt_list[::-1] startpos, endpos, x_list, y_list = \ self.uniform_sample_lane_y_axis(x_pt_list, y_pt_list) if startpos == -1 or endpos == -1: @@ -110,7 +111,6 @@ def encode_lane(self, lane_object, org_width, org_height): all_anchor_loc.append(gt_loc_list) all_anchor_list.append(anchor_list) - # process gt offset value if self.anchor_lane_num == 1: gt_type, gt_loc = self.get_one_lane_gt_loc_type(all_anchor_distance, all_anchor_loc, all_anchor_count) @@ -147,7 +147,6 @@ def decode_lane(self, predict_type, predict_loc, cls_thresh): down_lane = np.array([]) end_pos = anchor_y_pos start_pos = anchor_y_pos - # up anchor for i in range(self.points_per_line): if i >= relative_end_pos or anchor_y_pos + i >= self.points_per_line: break @@ -157,7 +156,6 @@ def decode_lane(self, predict_type, predict_loc, cls_thresh): p = Point(abs_x, abs_y) up_lane = np.append(up_lane, p) end_pos = anchor_y_pos + i + 1 - # down anchor for i in range(anchor_y_pos): rela_x = down_anchor_lane[i] abs_x = anchor_center_x + rela_x @@ -198,21 +196,22 @@ def get_one_lane_gt_loc_type(self, all_anchor_distance, all_anchor_loc, all_anch gt_loc_list, gt_dist_list = \ get_lane_loc_list(all_anchor_distance, all_anchor_loc, h, w) - if cnt == 0: # back ground + if cnt == 0: gt_lane_type[index, 0] = 1 - elif cnt == 1: # single + elif cnt == 1: gt_lane_type[index, 0] = 0 gt_lane_type[index, 1] = 1 gt_lane_offset[index, :self.pt_nums_single_lane] = gt_loc_list[0] - else: # choose one + else: gt_lane_type[index, 0] = 0 gt_lane_type[index, 1] = 1 - # choose small distance line_loc_num = len(gt_loc_list) line_dist_num = len(gt_dist_list) - assert (line_dist_num == line_loc_num) - [top_idx] = gettopk_idx(gt_dist_list) - gt_lane_offset[index, :self.pt_nums_single_lane] = gt_loc_list[top_idx] + if line_dist_num == line_loc_num: + [top_idx] = gettopk_idx(gt_dist_list) + gt_lane_offset[index, :self.pt_nums_single_lane] = gt_loc_list[top_idx] + else: + raise ValueError('Feature is Wrong.') gt_loc = gt_lane_offset.astype(np.float32) gt_type = gt_lane_type.astype(np.float32) @@ -238,8 +237,8 @@ def uniform_sample_lane_y_axis(self, x_pt_list, y_pt_list): max_y = y_new x_list = np.array(x_pt_list) - y_list = np.array(y_pt_list) # y from small to big - if y_list.max() - y_list.min() < 5: # filter < 5 pixel lane + y_list = np.array(y_pt_list) + if y_list.max() - y_list.min() < 5: return -1, -1, [], [] if len(y_list) < 4: tck = interpolate.splrep(y_list, x_list, k=1, s=0) @@ -272,7 +271,7 @@ def get_one_line_pass_anchors(self, startpos, endpos, xlist, y_list, anchor_coun for i in range(0, endpos - startpos + 1): h = self.feature_height - 1 - int((startpos + i) * self.interval / self.step_h) - w = int(xlist[i] / self.step_w) # IndexError: list index out of range + w = int(xlist[i] / self.step_w) if h < 0 or h > self.feature_height - 1 or w < 0 or w > self.feature_width - 1: continue if (h, w) in anchor_list: @@ -280,7 +279,6 @@ def get_one_line_pass_anchors(self, startpos, endpos, xlist, y_list, anchor_coun anchor_y = (1.0 * h + 0.5) * self.step_h center_x = (1.0 * w + 0.5) * self.step_w - # ensure anchor on same side of lane curr_y = self.input_height - 1 - i * self.interval if curr_y <= anchor_y: continue @@ -288,17 +286,14 @@ def get_one_line_pass_anchors(self, startpos, endpos, xlist, y_list, anchor_coun anchor_list.append((h, w)) center_y = y_list[int(self.points_per_line / self.feature_height) * (self.feature_height - 1 - h)] - # get lane offset loss_line = [0] * (self.points_per_line * 2 + 1) length = endpos - startpos + 1 - # offset up cur anchor up_index = 0 for j in range(0, length): if y_list[startpos + j] <= center_y: loss_line[self.points_per_line + 1 + up_index] = xlist[j] - center_x up_index += 1 loss_line[self.points_per_line] = up_index - # offset done cur anchor down_index = length - up_index - 1 for j in range(0, endpos - startpos + 1): if y_list[startpos + j] > center_y: diff --git a/vega/datasets/common/utils/auto_lane_spline_interp.py b/vega/datasets/common/utils/auto_lane_spline_interp.py index a5c2ff3..94e1eff 100644 --- a/vega/datasets/common/utils/auto_lane_spline_interp.py +++ b/vega/datasets/common/utils/auto_lane_spline_interp.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This script is used to process the auto lane dataset.""" import numpy as np diff --git a/vega/datasets/common/utils/auto_lane_utils.py b/vega/datasets/common/utils/auto_lane_utils.py index b510806..09e47af 100644 --- a/vega/datasets/common/utils/auto_lane_utils.py +++ b/vega/datasets/common/utils/auto_lane_utils.py @@ -1,20 +1,26 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This script is used to process the auto lane dataset.""" -import cv2 -import numpy as np +import os import json import PIL -import os +import cv2 +import numpy as np def hwc2chw(img): diff --git a/vega/datasets/common/utils/avazu_util.py b/vega/datasets/common/utils/avazu_util.py index d95a344..3eb9a8e 100644 --- a/vega/datasets/common/utils/avazu_util.py +++ b/vega/datasets/common/utils/avazu_util.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This script is used to process the Avazu dataset.""" from __future__ import division @@ -117,7 +123,6 @@ def _iterate_npy_files_(self, gen_type='train', num_of_files=None, shuffle_block else: logging.info("generating {} files from offset {}".format(gen_type, offset)) parts = np.arange(num_of_files)[offset:] - # while True: if shuffle_block: for i in range(int(shuffle_block)): np.random.shuffle(parts) @@ -126,7 +131,6 @@ def _iterate_npy_files_(self, gen_type='train', num_of_files=None, shuffle_block os.path.join(self.npy_data_dir, file_prefix + '_output_part_' + str(p) + '.npy'), \ i + 1 == len(parts) - # todo: support val_ratio def batch_generator(self, gen_type='train', batch_size=None, pos_ratio=None, num_of_parts=None, val_ratio=None, random_sample=False, shuffle_block=False, split_fields=False, on_disk=True): """Genetate a batch_size data. @@ -144,8 +148,6 @@ def batch_generator(self, gen_type='train', batch_size=None, pos_ratio=None, num else using unified index, defaults to False :param bool on_disk: Whether the data is on disk or not, defaults to True """ - # if pos_ratio is None: - # pos_ratio = self.train_pos_ratio if batch_size is None: batch_size = max(int(1 / self.train_pos_ratio), int(1 / self.test_pos_ratio)) + 1 @@ -208,16 +210,18 @@ def generator(X, y, batch_size, shuffle=True): if shuffle: for i in range(int(shuffle)): np.random.shuffle(sample_index) - assert X.shape[0] > 0 - while True: - batch_idx = sample_index[batch_size * counter:batch_size * (counter + 1)] - X_batch = X[batch_idx] - y_batch = y[batch_idx] - counter += 1 - if counter == num_of_batches: - counter = 0 - finished = True - yield X_batch, y_batch, finished + if X.shape[0] > 0: + while True: + batch_idx = sample_index[batch_size * counter:batch_size * (counter + 1)] + X_batch = X[batch_idx] + y_batch = y[batch_idx] + counter += 1 + if counter == num_of_batches: + counter = 0 + finished = True + yield X_batch, y_batch, finished + else: + raise ValueError('Shape of data must be bigger than 0.') @staticmethod def split_pos_neg(X, y): diff --git a/vega/datasets/common/utils/data_processor.py b/vega/datasets/common/utils/data_processor.py deleted file mode 100644 index 48ffa62..0000000 --- a/vega/datasets/common/utils/data_processor.py +++ /dev/null @@ -1,431 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""This script is used to process the Avazu dataset.""" -import csv -import os - - -class InputExample(object): - """A single training/test example for simple sequence classification.""" - - def __init__(self, guid, text_a, text_b=None, label=None): - self.guid = guid - self.text_a = text_a - self.text_b = text_b - self.label = label - - -class InputFeatures(object): - """A single set of features of data.""" - - def __init__(self, input_ids, input_mask, segment_ids, label_id, seq_length=None): - self.input_ids = input_ids - self.input_mask = input_mask - self.segment_ids = segment_ids - self.seq_length = seq_length - self.label_id = label_id - - -class DataProcessor(object): - """Base class for data converters for sequence classification data sets.""" - - def get_train_examples(self, data_dir): - """Get a collection of `InputExample`s for the train set.""" - raise NotImplementedError() - - def get_dev_examples(self, data_dir): - """Get a collection of `InputExample`s for the dev set.""" - raise NotImplementedError() - - def get_labels(self): - """Get the list of labels for this data set.""" - raise NotImplementedError() - - def get_examples(self, mode, data_dir): - """Get all examples.""" - if mode == 'train': - return self.get_train_examples(data_dir) - elif mode == 'val': - return self.get_dev_examples(data_dir) - elif mode == 'test': - return self.get_dev_examples(data_dir) - - @classmethod - def _read_tsv(cls, input_file, quotechar=None): - """Read a tab separated value file.""" - with open(input_file, "r", encoding="utf-8") as f: - reader = csv.reader(f, delimiter="\t", quotechar=quotechar) - return [line for line in reader] - - -class MrpcProcessor(DataProcessor): - """Processor for the MRPC data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """Get train examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """Get dev examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") - - def get_aug_examples(self, data_dir): - """Get aug examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") - - def get_labels(self): - """Get labels.""" - return ["0", "1"] - - def _create_examples(self, lines, set_type): - """Create examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, i) - text_a = line[3] - text_b = line[4] - label = line[0] - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples - - -class MnliProcessor(DataProcessor): - """Processor for the MultiNLI data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """Get train examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """Get dev examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), - "dev_matched") - - def get_aug_examples(self, data_dir): - """Get aug examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") - - def get_labels(self): - """Get labels.""" - return ["contradiction", "entailment", "neutral"] - - def _create_examples(self, lines, set_type): - """Create examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, line[0]) - text_a = line[8] - text_b = line[9] - label = line[-1] - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples - - -class MnliMismatchedProcessor(MnliProcessor): - """Processor for the MultiNLI Mismatched data set (GLUE version).""" - - def get_dev_examples(self, data_dir): - """Get train examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), - "dev_matched") - - -class ColaProcessor(DataProcessor): - """Processor for the CoLA data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """Get train examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """Get dev examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") - - def get_aug_examples(self, data_dir): - """Get aug examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") - - def get_labels(self): - """Get labels.""" - return ["0", "1"] - - def _create_examples(self, lines, set_type): - """Create examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - guid = "%s-%s" % (set_type, i) - text_a = line[3] - label = line[1] - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) - return examples - - -class Sst2Processor(DataProcessor): - """Processor for the SST-2 data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """Get train examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """Get dev examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") - - def get_aug_examples(self, data_dir): - """Get aug examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") - - def get_labels(self): - """Get labels.""" - return ["0", "1"] - - def _create_examples(self, lines, set_type): - """Create examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, i) - text_a = line[0] - label = line[1] - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) - return examples - - -class StsbProcessor(DataProcessor): - """Processor for the STS-B data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """Get train examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """Get dev examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") - - def get_aug_examples(self, data_dir): - """Get aug examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") - - def get_labels(self): - """Get labels.""" - return [None] - - def _create_examples(self, lines, set_type): - """Create examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, line[0]) - text_a = line[7] - text_b = line[8] - label = line[-1] - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples - - -class QqpProcessor(DataProcessor): - """Processor for the STS-B data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """Get train examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """Get dev examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") - - def get_aug_examples(self, data_dir): - """Get aug examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") - - def get_labels(self): - """Get labels.""" - return ["0", "1"] - - def _create_examples(self, lines, set_type): - """Create examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, line[0]) - try: - text_a = line[3] - text_b = line[4] - label = line[5] - except IndexError: - continue - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples - - -class QnliProcessor(DataProcessor): - """Processor for the STS-B data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """Get train examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """Get dev examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), - "dev_matched") - - def get_aug_examples(self, data_dir): - """Get aug examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") - - def get_labels(self): - """Get labels.""" - return ["entailment", "not_entailment"] - - def _create_examples(self, lines, set_type): - """Create examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, line[0]) - text_a = line[1] - text_b = line[2] - label = line[-1] - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples - - -class RteProcessor(DataProcessor): - """Processor for the RTE data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """Get train examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """Get dev examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") - - def get_aug_examples(self, data_dir): - """Get aug examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train_aug.tsv")), "aug") - - def get_labels(self): - """Get labels.""" - return ["entailment", "not_entailment"] - - def _create_examples(self, lines, set_type): - """Create examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, line[0]) - text_a = line[1] - text_b = line[2] - label = line[-1] - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples - - -class WnliProcessor(DataProcessor): - """Processor for the WNLI data set (GLUE version).""" - - def get_train_examples(self, data_dir): - """Get train examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") - - def get_dev_examples(self, data_dir): - """Get dev examples.""" - return self._create_examples( - self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") - - def get_labels(self): - """Get labels.""" - return ["0", "1"] - - def _create_examples(self, lines, set_type): - """Create examples for the training and dev sets.""" - examples = [] - for (i, line) in enumerate(lines): - if i == 0: - continue - guid = "%s-%s" % (set_type, line[0]) - text_a = line[1] - text_b = line[2] - label = line[-1] - examples.append( - InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) - return examples - - -processors = { - "cola": ColaProcessor, - "mnli": MnliProcessor, - "mnli-mm": MnliMismatchedProcessor, - "mrpc": MrpcProcessor, - "sst-2": Sst2Processor, - "sts-b": StsbProcessor, - "qqp": QqpProcessor, - "qnli": QnliProcessor, - "rte": RteProcessor, - "wnli": WnliProcessor -} - -output_modes = { - "cola": "classification", - "mnli": "classification", - "mrpc": "classification", - "sst-2": "classification", - "sts-b": "regression", - "qqp": "classification", - "qnli": "classification", - "rte": "classification", - "wnli": "classification" -} diff --git a/vega/datasets/common/utils/div2k_util.py b/vega/datasets/common/utils/div2k_util.py index 23f3196..5697dfd 100644 --- a/vega/datasets/common/utils/div2k_util.py +++ b/vega/datasets/common/utils/div2k_util.py @@ -1,20 +1,26 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This script contains some common function to process the DIV2K dataset.""" import os -import pickle +import glob import cv2 import numpy as np -import glob +from vega.common import FileOps IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP'] @@ -84,8 +90,7 @@ def read_img_pkl(path): :return: the image :rtype: tuple """ - with open(path, "rb") as file: - return pickle.load(file) + return FileOps.load_pickle(path) def read_img_img(path): diff --git a/vega/datasets/common/utils/reds_util.py b/vega/datasets/common/utils/reds_util.py deleted file mode 100644 index 3a8e00b..0000000 --- a/vega/datasets/common/utils/reds_util.py +++ /dev/null @@ -1,139 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""This script contains some common function to process the REDS dataset.""" -import cv2 -import numpy as np -import random - - -def read_file(filepath): - """Read the file as buf.""" - filepath = str(filepath) - with open(filepath, 'rb') as f: - value_buf = f.read() - return value_buf - - -def imfrombytes(content, flag='color', float32=False): - """Convert bytes to image.""" - img_np = np.frombuffer(content, np.uint8) - imread_flags = { - 'color': cv2.IMREAD_COLOR, - 'grayscale': cv2.IMREAD_GRAYSCALE, - 'unchanged': cv2.IMREAD_UNCHANGED - } - img = cv2.imdecode(img_np, imread_flags[flag]) - if float32: - img = img.astype(np.float32) / 255. - return img - - -def img2tensor(imgs, bgr2rgb=True, float32=True): - """Convert image to tensor.""" - def _totensor(img, bgr2rgb, float32): - if img.shape[2] == 3 and bgr2rgb: - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - img = img.transpose(2, 0, 1) - if float32: - img = img.astype(np.float32) - return img - - if isinstance(imgs, list): - return [_totensor(img, bgr2rgb, float32) for img in imgs] - else: - return _totensor(imgs, bgr2rgb, float32) - - -def augment(imgs, hflip=True, rotation=True, flows=None): - """Augment the images with flip or rotation.""" - hflip = hflip and random.random() < 0.5 - vflip = rotation and random.random() < 0.5 - rot90 = rotation and random.random() < 0.5 - - def _augment(img): - if hflip: # horizontal - cv2.flip(img, 1, img) - if vflip: # vertical - cv2.flip(img, 0, img) - if rot90: - img = img.transpose(1, 0, 2) - return img - - def _augment_flow(flow): - if hflip: # horizontal - cv2.flip(flow, 1, flow) - flow[:, :, 0] *= -1 - if vflip: # vertical - cv2.flip(flow, 0, flow) - flow[:, :, 1] *= -1 - if rot90: - flow = flow.transpose(1, 0, 2) - flow = flow[:, :, [1, 0]] - return flow - - if not isinstance(imgs, list): - imgs = [imgs] - imgs = [_augment(img) for img in imgs] - if len(imgs) == 1: - imgs = imgs[0] - - if flows is not None: - if not isinstance(flows, list): - flows = [flows] - flows = [_augment_flow(flow) for flow in flows] - if len(flows) == 1: - flows = flows[0] - return imgs, flows - else: - return imgs - - -def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path): - """Augment the images with paired random crop.""" - if not isinstance(img_gts, list): - img_gts = [img_gts] - if not isinstance(img_lqs, list): - img_lqs = [img_lqs] - - h_lq, w_lq, _ = img_lqs[0].shape - h_gt, w_gt, _ = img_gts[0].shape - lq_patch_size = gt_patch_size // scale - - if h_gt != h_lq * scale or w_gt != w_lq * scale: - raise ValueError( - f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ', - f'multiplication of LQ ({h_lq}, {w_lq}).') - if h_lq < lq_patch_size or w_lq < lq_patch_size: - raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size ' - f'({lq_patch_size}, {lq_patch_size}). ' - f'Please remove {gt_path}.') - - # randomly choose top and left coordinates for lq patch - top = random.randint(0, h_lq - lq_patch_size) - left = random.randint(0, w_lq - lq_patch_size) - - # crop lq patch - img_lqs = [ - v[top:top + lq_patch_size, left:left + lq_patch_size, ...] - for v in img_lqs - ] - - # crop corresponding gt patch - top_gt, left_gt = int(top * scale), int(left * scale) - img_gts = [ - v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] - for v in img_gts - ] - if len(img_gts) == 1: - img_gts = img_gts[0] - if len(img_lqs) == 1: - img_lqs = img_lqs[0] - return img_gts, img_lqs diff --git a/vega/datasets/common/utils/transforms.py b/vega/datasets/common/utils/transforms.py index 843aeaf..9ad6cb1 100644 --- a/vega/datasets/common/utils/transforms.py +++ b/vega/datasets/common/utils/transforms.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Transforms.""" diff --git a/vega/datasets/common/utils/util.py b/vega/datasets/common/utils/util.py index 8a14bdc..8d6d209 100644 --- a/vega/datasets/common/utils/util.py +++ b/vega/datasets/common/utils/util.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This script contains some common tools.""" diff --git a/vega/datasets/conf/auto_lane.py b/vega/datasets/conf/auto_lane.py index 6657514..4472dbf 100644 --- a/vega/datasets/conf/auto_lane.py +++ b/vega/datasets/conf/auto_lane.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class AutoLaneCommonConfig(BaseConfig): diff --git a/vega/datasets/conf/avazu.py b/vega/datasets/conf/avazu.py index 4d78fa2..cc69fb5 100644 --- a/vega/datasets/conf/avazu.py +++ b/vega/datasets/conf/avazu.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class AvazuCommonConfig(BaseConfig): diff --git a/vega/datasets/conf/base.py b/vega/datasets/conf/base.py index d748ac7..2374555 100644 --- a/vega/datasets/conf/base.py +++ b/vega/datasets/conf/base.py @@ -1,12 +1,18 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" from vega.common import ConfigSerializable @@ -42,21 +48,28 @@ def rules(cls): return rules_Base -class ExtConfig(BaseConfig): +class ExtTrainConfig(BaseConfig): """Extension config.""" - def __getattr__(self, item): - """Override getattr function.""" - if hasattr(self, item): - return super().__getattribute__(item) - else: - return None + pass + + +class ExtValConfig(BaseConfig): + """Extension config.""" + + pass + + +class ExtTestConfig(BaseConfig): + """Extension config.""" + + pass class ExtDatasetConfig(ConfigSerializable): """Extension dataset config.""" - common = ExtConfig - train = ExtConfig - val = ExtConfig - test = ExtConfig + common = BaseConfig + train = ExtTrainConfig + val = ExtValConfig + test = ExtTestConfig diff --git a/vega/datasets/conf/cifar10.py b/vega/datasets/conf/cifar10.py index dd0bb79..1fedf66 100644 --- a/vega/datasets/conf/cifar10.py +++ b/vega/datasets/conf/cifar10.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class Cifar10CommonConfig(BaseConfig): @@ -43,8 +49,6 @@ class Cifar10TrainConfig(Cifar10CommonConfig): dict(type='RandomCrop', size=32, padding=4), dict(type='RandomHorizontalFlip'), dict(type='ToTensor'), - # rgb_mean = np.mean(train_data, axis=(0, 1, 2))/255 - # rgb_std = np.std(train_data, axis=(0, 1, 2))/255 dict(type='Normalize', mean=[0.49139968, 0.48215827, 0.44653124], std=[0.24703233, 0.24348505, 0.26158768])] padding = 8 num_images = 50000 diff --git a/vega/datasets/conf/cifar100.py b/vega/datasets/conf/cifar100.py index a9c5992..5bf4ebf 100644 --- a/vega/datasets/conf/cifar100.py +++ b/vega/datasets/conf/cifar100.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class Cifar100CommonConfig(BaseConfig): diff --git a/vega/datasets/conf/city_scapes.py b/vega/datasets/conf/city_scapes.py index 71504bc..fe0d6c3 100644 --- a/vega/datasets/conf/city_scapes.py +++ b/vega/datasets/conf/city_scapes.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class CityscapesCommonConfig(BaseConfig): diff --git a/vega/datasets/conf/cls_ds.py b/vega/datasets/conf/cls_ds.py index ef76930..2b9f46b 100644 --- a/vega/datasets/conf/cls_ds.py +++ b/vega/datasets/conf/cls_ds.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class ClassificationDatasetCommonConfig(BaseConfig): diff --git a/vega/datasets/conf/coco.py b/vega/datasets/conf/coco.py index 73de063..c98d395 100644 --- a/vega/datasets/conf/coco.py +++ b/vega/datasets/conf/coco.py @@ -1,12 +1,18 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" from vega.datasets.conf.base import BaseConfig diff --git a/vega/datasets/conf/dataset.py b/vega/datasets/conf/dataset.py index 7ca89b4..e8cd96f 100644 --- a/vega/datasets/conf/dataset.py +++ b/vega/datasets/conf/dataset.py @@ -1,12 +1,18 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Dataset configs.""" from vega.common import ConfigSerializable diff --git a/vega/datasets/conf/div2k.py b/vega/datasets/conf/div2k.py index dab8259..0b267d0 100644 --- a/vega/datasets/conf/div2k.py +++ b/vega/datasets/conf/div2k.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class DIV2KCommonConfig(BaseConfig): diff --git a/vega/datasets/conf/fashion_mnist.py b/vega/datasets/conf/fashion_mnist.py index f884e12..e5c5020 100644 --- a/vega/datasets/conf/fashion_mnist.py +++ b/vega/datasets/conf/fashion_mnist.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class FashionMnistCommonConfig(BaseConfig): diff --git a/vega/datasets/conf/glue.py b/vega/datasets/conf/glue.py index a515341..8fa57e1 100644 --- a/vega/datasets/conf/glue.py +++ b/vega/datasets/conf/glue.py @@ -1,12 +1,18 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Dataset configs.""" from vega.datasets.conf.base import BaseConfig from vega.common import ConfigSerializable diff --git a/vega/datasets/conf/imagenet.py b/vega/datasets/conf/imagenet.py index 4e1f35c..d5cc58e 100644 --- a/vega/datasets/conf/imagenet.py +++ b/vega/datasets/conf/imagenet.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class ImagenetCommonConfig(BaseConfig): diff --git a/vega/datasets/conf/mnist.py b/vega/datasets/conf/mnist.py index ba984c1..406b055 100644 --- a/vega/datasets/conf/mnist.py +++ b/vega/datasets/conf/mnist.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class MnistCommonConfig(BaseConfig): diff --git a/vega/datasets/conf/mrpc.py b/vega/datasets/conf/mrpc.py index 106e179..1db3140 100644 --- a/vega/datasets/conf/mrpc.py +++ b/vega/datasets/conf/mrpc.py @@ -1,12 +1,18 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" from vega.datasets.conf.base import BaseConfig diff --git a/vega/datasets/conf/nasbench.py b/vega/datasets/conf/nasbench.py index 4bc3ab2..c5421da 100644 --- a/vega/datasets/conf/nasbench.py +++ b/vega/datasets/conf/nasbench.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class NasbenchCommonConfig(BaseConfig): diff --git a/vega/datasets/conf/pacs.py b/vega/datasets/conf/pacs.py new file mode 100644 index 0000000..77831e1 --- /dev/null +++ b/vega/datasets/conf/pacs.py @@ -0,0 +1,75 @@ +# -*- coding=utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Default configs.""" + +from vega.common import ConfigSerializable +from .base import BaseConfig + + +class PacsCommonConfig(BaseConfig): + """Default Dataset config for PacsCommon.""" + transforms = [ + dict(type='Resize', size=225), + dict(type='ToTensor'), + dict(type='Normalize', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])] + batch_size = 10 + data_path = None + split_path = None + targetdomain = None + train_portion = 1.0 + task = None + + @classmethod + def rules(cls): + """Return rules for checking.""" + + rules_PacsCommonConfig = {"transforms": {"type": list}, + "data_path": {"type": str}, + "split_path": {"type": str}, + "targetdomain": {"type": str}, + "batch_size": {"type": int}, + "train_portion": {"type": (int, float)}, + } + return rules_PacsCommonConfig + + +class PacsConfig(ConfigSerializable): + """Default Dataset config for Pacs.""" + + common = PacsCommonConfig + train = PacsCommonConfig + val = PacsCommonConfig + test = PacsCommonConfig + + @classmethod + def rules(cls): + """Return rules for checking.""" + + rules_Pacs = {'common': {"type": dict}, + 'train': {"type": dict}, + 'val': {"type": dict}, + 'test': {"type": dict} + } + return rules_Pacs + + @classmethod + def get_config(cls): + """Get sub config.""" + return {'common': cls.common, + 'train': cls.train, + 'val': cls.val, + 'test': cls.test + } diff --git a/vega/datasets/conf/reds.py b/vega/datasets/conf/reds.py index 9985960..9105972 100644 --- a/vega/datasets/conf/reds.py +++ b/vega/datasets/conf/reds.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class REDSCommonConfig(BaseConfig): diff --git a/vega/datasets/conf/sr.py b/vega/datasets/conf/sr.py index dfbd669..82d8f01 100644 --- a/vega/datasets/conf/sr.py +++ b/vega/datasets/conf/sr.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class SRCommonConfig(BaseConfig): diff --git a/vega/datasets/conf/st.py b/vega/datasets/conf/st.py index a6fd50e..a72bc53 100644 --- a/vega/datasets/conf/st.py +++ b/vega/datasets/conf/st.py @@ -1,16 +1,22 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" -from .base import BaseConfig from vega.common import ConfigSerializable +from .base import BaseConfig class SpatiotemporalConfig(BaseConfig): diff --git a/vega/datasets/mindspore/__init__.py b/vega/datasets/mindspore/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/vega/datasets/mindspore/adapter.py b/vega/datasets/mindspore/adapter.py index 3514b3a..0af6b9f 100644 --- a/vega/datasets/mindspore/adapter.py +++ b/vega/datasets/mindspore/adapter.py @@ -1,127 +1,138 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""This is a base class of the dataset.""" - -from mindspore.dataset import GeneratorDataset, DistributedSampler, SubsetRandomSampler -import mindspore.dataset.transforms.c_transforms as C2 -import mindspore.dataset.vision.c_transforms as vision -import mindspore.common.dtype as mstype -import numpy as np -from mindspore.communication.management import get_rank, get_group_size - - -class MsAdapter(object): - """This is the base class of the dataset, which is a subclass of `TaskOps`. - - The Dataset provide several basic attribute like dataloader, transform and sampler. - """ - - invalid_dtype = ("float64", "int64", "torch.float64", "torch.int64") - dtype_map = {"float64": mstype.float32, - "int64": mstype.int32, - "torch.float64": mstype.float32, - "torch.int64": mstype.int32} - - def __init__(self, dataset): - self.dataset = dataset - self.args = dataset.args - self.sampler = self._init_sampler() - - def convert_dtype(self, ms_dataset): - """Convert the dataset dtype if the dtype is invalid. - - :param ms_dataset: a dataset object of mindspore - :return: a dataset object of mindspore after dtype convert - """ - item = self.dataset[0] - image, label = item[0], item[1] - try: - image_dtype = str(image.dtype) - except Exception: - pass - try: - label_dtype = str(label.dtype) - except Exception: - label_dtype = "int64" - if image_dtype in self.invalid_dtype: - type_cast_op = C2.TypeCast(self.dtype_map[image_dtype]) - ms_dataset = ms_dataset.map(input_columns="image", operations=type_cast_op) - - if label_dtype in self.invalid_dtype: - type_cast_op = C2.TypeCast(self.dtype_map[label_dtype]) - ms_dataset = ms_dataset.map(input_columns="label", operations=type_cast_op) - - return ms_dataset - - def _init_sampler(self): - """Initialize sampler method. - - :return: if the distributed is True, return a sampler object, else return None - :rtype: an object or None - """ - if self.dataset.world_size > 1: - sampler = DistributedSampler(num_shards=self.dataset.world_size, - shard_id=self.dataset.rank, - shuffle=self.args.shuffle) - self.args.shuffle = False - elif not hasattr(self.args, "train_portion"): - sampler = None - elif self.dataset.mode == 'test' or self.args.train_portion == 1: - sampler = None - else: - self.args.shuffle = False - num_train = len(self.dataset) - indices = list(range(num_train)) - split = int(np.floor(self.args.train_portion * num_train)) - if self.dataset.mode == 'train': - sampler = SubsetRandomSampler(indices[:split]) - elif self.dataset.mode == 'val': - sampler = SubsetRandomSampler(indices[split:num_train]) - else: - raise ValueError('the mode should be train, val or test') - return sampler - - @property - def loader(self): - """Dataloader arrtribute which is a unified interface to generate the data. - - :return: a batch data - :rtype: dict, list, optional - """ - rank_size = 1 - rank_id = 0 - if self.dataset.world_size > 1: - rank_size = get_group_size() - rank_id = get_rank() - self.sampler = None - ms_dataset = GeneratorDataset(self.dataset, ["image", "label"], sampler=self.sampler, num_shards=rank_size, - shard_id=rank_id) - # ms_dataset.set_dataset_size(len(self.dataset)) # TODO delete, only mindspore 0.5 need - ms_dataset = self.convert_dtype(ms_dataset) - if self.args.shuffle: - buffer_size = self.args.get("buffer_size", len(self.dataset)) - ms_dataset = ms_dataset.shuffle(buffer_size=buffer_size) - - if self.args.get("mixup", False): - num_class = self.args.get("num_class") - one_hot_op = C2.OneHot(num_classes=num_class) - ms_dataset = ms_dataset.map(operations=one_hot_op, input_columns=["label"]) - - mixup_batch_op = vision.MixUpBatch(2) - ms_dataset = ms_dataset.batch(self.args.batch_size) - ms_dataset = ms_dataset.map(operations=mixup_batch_op, input_columns=["image", "label"]) - else: - ms_dataset = ms_dataset.batch(self.args.batch_size) - - from mindspore.dataset.engine.datasets import BatchDataset, MapDataset - BatchDataset.__len__ = BatchDataset.get_dataset_size - MapDataset.__len__ = MapDataset.get_dataset_size - return ms_dataset +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This is a base class of the dataset.""" + +import logging +from typing import Iterator +import numpy as np +from mindspore.dataset import GeneratorDataset, DistributedSampler, SubsetRandomSampler +import mindspore.dataset.transforms.c_transforms as C2 +import mindspore.dataset.vision.c_transforms as vision +import mindspore.common.dtype as mstype +from mindspore.communication.management import get_rank, get_group_size + + +class MsAdapter(object): + """This is the base class of the dataset, which is a subclass of `TaskOps`. + + The Dataset provide several basic attribute like dataloader, transform and sampler. + """ + + invalid_dtype = ("float64", "int64", "torch.float64", "torch.int64") + dtype_map = {"float64": mstype.float32, + "int64": mstype.int32, + "torch.float64": mstype.float32, + "torch.int64": mstype.int32} + + def __init__(self, dataset): + self.dataset = dataset + self.args = dataset.args + self.sampler = self._init_sampler() + + def convert_dtype(self, ms_dataset): + """Convert the dataset dtype if the dtype is invalid. + + :param ms_dataset: a dataset object of mindspore + :return: a dataset object of mindspore after dtype convert + """ + item = self.dataset[0] + image, label = item[0], item[1] + try: + image_dtype = str(image.dtype) + except Exception: + logging.debug('Falied to get image dtype.') + try: + label_dtype = str(label.dtype) + except Exception: + label_dtype = "int64" + if image_dtype in self.invalid_dtype: + type_cast_op = C2.TypeCast(self.dtype_map[image_dtype]) + ms_dataset = ms_dataset.map(input_columns="image", operations=type_cast_op) + + if label_dtype in self.invalid_dtype: + type_cast_op = C2.TypeCast(self.dtype_map[label_dtype]) + ms_dataset = ms_dataset.map(input_columns="label", operations=type_cast_op) + + return ms_dataset + + def _init_sampler(self): + """Initialize sampler method. + + :return: if the distributed is True, return a sampler object, else return None + :rtype: an object or None + """ + if self.dataset.world_size > 1: + sampler = DistributedSampler(num_shards=self.dataset.world_size, + shard_id=self.dataset.rank, + shuffle=self.args.shuffle) + self.args.shuffle = False + elif not hasattr(self.args, "train_portion"): + sampler = None + elif self.dataset.mode == 'test' or self.args.train_portion == 1: + sampler = None + else: + self.args.shuffle = False + num_train = len(self.dataset) + indices = list(range(num_train)) + split = int(np.floor(self.args.train_portion * num_train)) + if self.dataset.mode == 'train': + sampler = SubsetRandomSampler(indices[:split]) + elif self.dataset.mode == 'val': + sampler = SubsetRandomSampler(indices[split:num_train]) + else: + raise ValueError('the mode should be train, val or test') + return sampler + + @property + def loader(self): + """Dataloader arrtribute which is a unified interface to generate the data. + + :return: a batch data + :rtype: dict, list, optional + """ + from mindspore.dataset.engine.datasets import BatchDataset, MapDataset + BatchDataset.__len__ = BatchDataset.get_dataset_size + MapDataset.__len__ = MapDataset.get_dataset_size + GeneratorDataset.__len__ = GeneratorDataset.get_dataset_size + Iterator.__len__ = lambda x: x.dataset.get_dataset_size() + if hasattr(self.dataset, "data_loader"): + return self.dataset.data_loader + rank_size = 1 + rank_id = 0 + if self.dataset.world_size > 1: + rank_size = get_group_size() + rank_id = get_rank() + self.sampler = None + ms_dataset = GeneratorDataset(self.dataset, ["image", "label"], sampler=self.sampler, num_shards=rank_size, + shard_id=rank_id) + ms_dataset = self.convert_dtype(ms_dataset) + if self.args.shuffle: + buffer_size = self.args.get("buffer_size", len(self.dataset)) + ms_dataset = ms_dataset.shuffle(buffer_size=buffer_size) + + if self.args.get("mixup", False): + num_class = self.args.get("num_class") + one_hot_op = C2.OneHot(num_classes=num_class) + ms_dataset = ms_dataset.map(operations=one_hot_op, input_columns=["label"]) + + mixup_batch_op = vision.MixUpBatch(2) + ms_dataset = ms_dataset.batch(self.args.batch_size) + ms_dataset = ms_dataset.map(operations=mixup_batch_op, input_columns=["image", "label"]) + else: + ms_dataset = ms_dataset.batch(self.args.batch_size) + + return ms_dataset diff --git a/vega/datasets/pytorch/__init__.py b/vega/datasets/pytorch/__init__.py index cfb90c0..dfdd346 100644 --- a/vega/datasets/pytorch/__init__.py +++ b/vega/datasets/pytorch/__init__.py @@ -1,18 +1,24 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Lazy import dataset.""" - -from vega.common.class_factory import ClassFactory - - -ClassFactory.lazy_register("vega.datasets.pytorch", { - "coco_transforms": ["CocoCategoriesTransform", "PolysToMaskTransform"], -}) +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Lazy import dataset.""" + +from vega.common.class_factory import ClassFactory + + +ClassFactory.lazy_register("vega.datasets.pytorch", { + "coco_transforms": ["CocoCategoriesTransform", "PolysToMaskTransform"], +}) diff --git a/vega/datasets/pytorch/adapter.py b/vega/datasets/pytorch/adapter.py index 50bdb9e..6cc6fde 100644 --- a/vega/datasets/pytorch/adapter.py +++ b/vega/datasets/pytorch/adapter.py @@ -1,18 +1,26 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a base class of the dataset.""" +import logging + from torch.utils import data as torch_data -from .samplers import DistributedSampler from torch.utils.data.sampler import SubsetRandomSampler import numpy as np +from .samplers import DistributedSampler class TorchAdapter(object): @@ -75,12 +83,16 @@ def loader(self): """ if hasattr(self.dataset, "data_loader"): return self.dataset.data_loader - data_loader = torch_data.DataLoader(dataset=self.dataset, - batch_size=self.args.batch_size, - shuffle=self.args.shuffle, - num_workers=self.args.num_workers, - pin_memory=self.args.pin_memory, - sampler=self.sampler, - drop_last=self.args.drop_last, - collate_fn=self.collate_fn) + try: + data_loader = torch_data.DataLoader(dataset=self.dataset, + batch_size=self.args.batch_size, + shuffle=self.args.shuffle, + num_workers=self.args.num_workers, + pin_memory=self.args.pin_memory, + sampler=self.sampler, + drop_last=self.args.drop_last, + collate_fn=self.collate_fn) + except BrokenPipeError as ex: + logging.debug(ex) + data_loader = None return data_loader diff --git a/vega/datasets/pytorch/coco_transforms.py b/vega/datasets/pytorch/coco_transforms.py index f8b4146..b8d4fb1 100644 --- a/vega/datasets/pytorch/coco_transforms.py +++ b/vega/datasets/pytorch/coco_transforms.py @@ -1,16 +1,16 @@ -# -*- coding: utf-8 -*- +# -*- coding:utf-8 -*- -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# This file is adapted from the torchvision library at +# https://github.com/pytorch/vision/blob/main/references/detection/coco_utils.py + +# 2020.11.12-Changed for vega +# Huawei Technologies Co., Ltd. +# Copyright 2020 Huawei Technologies Co., Ltd. """This is a class for Coco Transforms.""" -import torch + import copy +import torch from pycocotools import mask as coco_mask from vega.common import ClassFactory, ClassType @@ -50,7 +50,6 @@ def __call__(self, image, target): anno = target["annotations"] anno = [obj for obj in anno if obj['iscrowd'] == 0] boxes = [obj["bbox"] for obj in anno] - # guard against no boxes via resizing boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2].clamp_(min=0, max=w) @@ -73,7 +72,6 @@ def __call__(self, image, target): keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) boxes = boxes[keep] classes = classes[keep] - # masks = masks[keep] if keypoints is not None: keypoints = keypoints[keep] @@ -85,7 +83,6 @@ def __call__(self, image, target): if keypoints is not None: target["keypoints"] = keypoints - # for conversion to coco api area = torch.tensor([obj["area"] for obj in anno]) iscrowd = torch.tensor([obj["iscrowd"] for obj in anno]) target["area"] = area @@ -119,7 +116,6 @@ class PrepareVOCInstance(object): """Convert dataset to Voc instance.""" CLASSES = ( - # "__background__ ", "aeroplane", "bicycle", "bird", @@ -170,13 +166,11 @@ def __call__(self, image, target): area = torch.as_tensor(area) iscrowd = torch.as_tensor(iscrowd) image_id = anno['filename'][5:-4] - # image_id = torch.as_tensor([int(image_id)]) image_id = image_id target = {} target["boxes"] = boxes target["labels"] = classes target["image_id"] = image_id - # for conversion to coco api target["area"] = area target["iscrowd"] = iscrowd target["file_name"] = anno['filename'] diff --git a/vega/datasets/pytorch/samplers/__init__.py b/vega/datasets/pytorch/samplers/__init__.py index 5a68f38..8470e3c 100644 --- a/vega/datasets/pytorch/samplers/__init__.py +++ b/vega/datasets/pytorch/samplers/__init__.py @@ -1,2 +1,2 @@ # -*- coding: utf-8 -*- -from .sampler import * +from .sampler import DistributedSampler, DistributedGroupSampler, GroupSampler diff --git a/vega/datasets/pytorch/samplers/sampler.py b/vega/datasets/pytorch/samplers/sampler.py index 93695a4..ed5a651 100644 --- a/vega/datasets/pytorch/samplers/sampler.py +++ b/vega/datasets/pytorch/samplers/sampler.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This script include some common sampler methods.""" from __future__ import division diff --git a/vega/datasets/sampler/sampler.py b/vega/datasets/sampler/sampler.py index 6e141f7..14eae24 100644 --- a/vega/datasets/sampler/sampler.py +++ b/vega/datasets/sampler/sampler.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This script include some common sampler methods.""" import numpy as np diff --git a/vega/datasets/tensorflow/adapter.py b/vega/datasets/tensorflow/adapter.py index 033d652..e06aad1 100644 --- a/vega/datasets/tensorflow/adapter.py +++ b/vega/datasets/tensorflow/adapter.py @@ -1,229 +1,236 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""This is a base class of the dataset.""" - -import tensorflow as tf -import vega -from vega.common.general import General - - -class TfAdapter(object): - """This is the base class of the dataset, which is a subclass of `TaskOps`. - - The Dataset provide several basic attribute like dataloader, transform and sampler. - """ - - dtype_map = {"torch.float32": tf.float32, - "float32": tf.float32, - "torch.float16": tf.float32, - "float16": tf.float32, - "float64": tf.double, - "torch.int32": tf.int32, - "int32": tf.int32, - "torch.int64": tf.int64, - "int64": tf.int64, - "int": tf.int64} - - def __init__(self, dataset): - self.dataset = dataset - self.args = dataset.args - self._num_examples = len(self.dataset) if hasattr(self.dataset, "__len__") else self.args.get('num_images') - self.data_index = list(range(self._num_examples)) - if self.args.get('train_portion', 1.0) < 1: - split = int(self.args.train_portion * self._num_examples) - if self.dataset.mode == 'train': - self.data_index = self.data_index[:split] - self._num_examples = split - elif self.dataset.mode == 'val': - self.data_index = self.data_index[split:] - self._num_examples = self._num_examples - split - self.repeat_ratio = self.args.get('repeat_ratio', 1.) - self.is_detection = self.args.get("is_detection", False) - self.is_spatiotemporal = self.args.get('is_spatiotemporal') - - def _get_dataset_info(self): - """Get the data shape.""" - if self.is_detection: - return - item = self.dataset[0] - if self.is_spatiotemporal: - self.feature_shape = [v.shape if v is not None else v for v in item] - if isinstance(item, (list, tuple)): - self.image_pos, self.label_pos = 0, 1 - elif isinstance(item, dict): - keys = list(item.keys()) - self.image_pos, self.label_pos = keys[0], keys[1] - else: - raise ValueError - image = item[self.image_pos] - label = item[self.label_pos] - self.fixed_size = self.args.get("fixed_size", True) - self.data_format = General.data_format - self.image_shape = list(image.shape) - try: - self.label_shape = list(label.shape) - except Exception: - self.label_shape = 1 - - try: - self.image_dtype = str(image.dtype) - except Exception: - pass - try: - self.label_dtype = str(label.dtype) - except Exception: - self.label_dtype = "int" - - self.image_dtype_tf = self.dtype_map[self.image_dtype] - self.label_dtype_tf = self.dtype_map[self.label_dtype] - - def _get_item(self, images_index, label_index): - """Get one item of the dataset.""" - item = self.dataset[images_index] - if self.is_spatiotemporal: - return item[0], item[1], item[2], item[3], item[4], item[5] - if not self.is_detection: - image = item[self.image_pos] - label = item[self.label_pos] - return image, label - else: - image = item[0] - img_meta = image.get("img_meta") - return image.get("img"), image.get("gt_bboxes"), image.get("gt_bboxes_ignore"), \ - image.get("gt_labels_ignore"), image.get("gt_labels"), \ - img_meta.get("ori_shape"), img_meta.get("img_shape"), \ - img_meta.get("pad_shape"), img_meta.get("scale_factor"), \ - img_meta.get("flip"), item[1] - - def _resize_image_label(self, image, label): - """Resize the image and label.""" - if len(self.image_shape) == 3: - img_channel = self.image_shape[0] - image.set_shape([img_channel, None, None]) - elif len(self.image_shape) == 2: - img_channel = 1 - image.set_shape([img_channel, None, None]) - else: - image.set_shape(self.image_shape) - - if self.label_shape == 1: - label.set_shape(self.label_shape) - elif len(self.label_shape) == 3: - label_channel = self.label_shape[0] - label.set_shape([label_channel, None, None]) - else: - label_channel = 1 - label.set_shape([label_channel, None, None]) - - return image, label - - def data_map_func(self, images_index, label_index): - """Apply data map function from raw data.""" - if self.is_spatiotemporal: - feature, spatial_mx, temporal_mx, mean, std, label = tf.numpy_function( - self._get_item, [images_index, label_index], - [tf.float64, tf.float64, tf.float64, tf.float64, tf.float64, tf.float64]) - feature.set_shape(self.feature_shape[0]) - spatial_mx.set_shape(self.feature_shape[1]) - temporal_mx.set_shape(self.feature_shape[2]) - label.set_shape(self.feature_shape[-1]) - return (feature, spatial_mx, temporal_mx), (mean, std, label) - if not self.is_detection: - image, label = tf.numpy_function(self._get_item, - [images_index, label_index], - [self.image_dtype_tf, self.label_dtype_tf]) - if self.fixed_size: - image.set_shape(self.image_shape) - label.set_shape(self.label_shape) - else: - image, label = self._resize_image_label(image, label) - - try: - label = tf.squeeze(label) - except Exception: - pass - if self.label_dtype == "int": - label = tf.cast(label, tf.int32) - if self.data_format == "channels_last": - try: - image = tf.transpose(image, [1, 2, 0]) - label = tf.transpose(label, [1, 2, 0]) - except Exception: - pass - else: - img, gt_bboxes, gt_bboxes_ignore, gt_labels_ignore, gt_labels, \ - ori_shape, img_shape, pad_shape, scale_factor, flip, target = tf.numpy_function( - self._get_item, - [images_index, label_index], - [tf.float32, tf.float32, tf.float32, tf.float32, tf.int64, - tf.int64, tf.int64, tf.int64, tf.float64, tf.bool, tf.int64]) - image = dict() - img_meta = dict() - img_meta["ori_shape"] = ori_shape - img_meta["img_shape"] = img_shape - img_meta["pad_shape"] = pad_shape - img_meta["scale_factor"] = scale_factor - img_meta["flip"] = flip - image["img"] = img - image["gt_bboxes"] = gt_bboxes - image["gt_bboxes_ignore"] = gt_bboxes_ignore - image["gt_labels"] = gt_labels - image["gt_labels_ignore"] = gt_labels_ignore - image["img_meta"] = img_meta - label = target - - return image, label - - def __len__(self): - """Return dataset length of train or valid.""" - if self.dataset.mode == 'train': - len = self._num_examples // self.args.batch_size - if self.dataset.world_size > 1: - len = len // self.dataset.world_size - len = int(len * self.repeat_ratio) - else: - len = self._num_examples // self.args.batch_size - return len - - def input_fn(self): - """Return the next `batch_size` examples from this data set.""" - if hasattr(self.dataset, "input_fn"): - return self.dataset.input_fn() - self._get_dataset_info() - dataset = tf.data.Dataset.from_tensor_slices( - (self.data_index, self.data_index)) - if self.dataset.mode == 'train' and self.dataset.world_size > 1: - dataset = dataset.shard(self.dataset.world_size, self.dataset.rank) - if self.dataset.mode == 'train': - dataset = dataset.repeat() - if self.args.shuffle: - dataset = dataset.shuffle(buffer_size=self._num_examples) - - if vega.is_npu_device(): - # esr cannot adapt to num_parallel_calls on NPU - dataset = dataset.map(self.data_map_func) - dataset = dataset.batch( - batch_size=self.args.batch_size, drop_remainder=self.args.drop_last) - else: - dataset = dataset.map(self.data_map_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) - dataset = dataset.batch( - batch_size=self.args.batch_size, drop_remainder=self.args.drop_last) - dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE) - return dataset - - @property - def loader(self): - """Dataloader arrtribute which is a unified interface to generate the data. - - :return: a batch data - :rtype: dict, list, optional - """ - return self +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This is a base class of the dataset.""" + +import logging +import tensorflow as tf +import vega +from vega.common.general import General + + +class TfAdapter(object): + """This is the base class of the dataset, which is a subclass of `TaskOps`. + + The Dataset provide several basic attribute like dataloader, transform and sampler. + """ + + dtype_map = {"torch.float32": tf.float32, + "float32": tf.float32, + "torch.float16": tf.float32, + "float16": tf.float32, + "float64": tf.double, + "torch.int32": tf.int32, + "int32": tf.int32, + "torch.int64": tf.int64, + "int64": tf.int64, + "int": tf.int64} + + def __init__(self, dataset): + self.dataset = dataset + self.args = dataset.args + self._num_examples = len(self.dataset) if hasattr(self.dataset, "__len__") else self.args.get('num_images') + self.data_index = list(range(self._num_examples)) + if self.args.get('train_portion', 1.0) < 1: + split = int(self.args.train_portion * self._num_examples) + if self.dataset.mode == 'train': + self.data_index = self.data_index[:split] + self._num_examples = split + elif self.dataset.mode == 'val': + self.data_index = self.data_index[split:] + self._num_examples = self._num_examples - split + self.repeat_ratio = self.args.get('repeat_ratio', 1.) + self.is_detection = self.args.get("is_detection", False) + self.is_spatiotemporal = self.args.get('is_spatiotemporal') + + def _get_dataset_info(self): + """Get the data shape.""" + if self.is_detection: + return + item = self.dataset[0] + if self.is_spatiotemporal: + self.feature_shape = [v.shape if v is not None else v for v in item] + if isinstance(item, (list, tuple)): + self.image_pos, self.label_pos = 0, 1 + elif isinstance(item, dict): + keys = list(item.keys()) + self.image_pos, self.label_pos = keys[0], keys[1] + else: + raise ValueError + image = item[self.image_pos] + label = item[self.label_pos] + self.fixed_size = self.args.get("fixed_size", True) + self.data_format = General.data_format + self.image_shape = list(image.shape) + try: + self.label_shape = list(label.shape) + except Exception: + self.label_shape = 1 + + try: + self.image_dtype = str(image.dtype) + except Exception: + logging.debug('Falied to get image dtype.') + try: + self.label_dtype = str(label.dtype) + except Exception: + self.label_dtype = "int" + + self.image_dtype_tf = self.dtype_map[self.image_dtype] + self.label_dtype_tf = self.dtype_map[self.label_dtype] + + def _get_item(self, images_index, label_index): + """Get one item of the dataset.""" + item = self.dataset[images_index] + if self.is_spatiotemporal: + return item[0], item[1], item[2], item[3], item[4], item[5] + if not self.is_detection: + image = item[self.image_pos] + label = item[self.label_pos] + return image, label + else: + image = item[0] + img_meta = image.get("img_meta") + return image.get("img"), image.get("gt_bboxes"), image.get("gt_bboxes_ignore"), \ + image.get("gt_labels_ignore"), image.get("gt_labels"), \ + img_meta.get("ori_shape"), img_meta.get("img_shape"), \ + img_meta.get("pad_shape"), img_meta.get("scale_factor"), \ + img_meta.get("flip"), item[1] + + def _resize_image_label(self, image, label): + """Resize the image and label.""" + if len(self.image_shape) == 3: + img_channel = self.image_shape[0] + image.set_shape([img_channel, None, None]) + elif len(self.image_shape) == 2: + img_channel = 1 + image.set_shape([img_channel, None, None]) + else: + image.set_shape(self.image_shape) + + if self.label_shape == 1: + label.set_shape(self.label_shape) + elif len(self.label_shape) == 3: + label_channel = self.label_shape[0] + label.set_shape([label_channel, None, None]) + else: + label_channel = 1 + label.set_shape([label_channel, None, None]) + + return image, label + + def data_map_func(self, images_index, label_index): + """Apply data map function from raw data.""" + if self.is_spatiotemporal: + feature, spatial_mx, temporal_mx, mean, std, label = tf.numpy_function( + self._get_item, [images_index, label_index], + [tf.float64, tf.float64, tf.float64, tf.float64, tf.float64, tf.float64]) + feature.set_shape(self.feature_shape[0]) + spatial_mx.set_shape(self.feature_shape[1]) + temporal_mx.set_shape(self.feature_shape[2]) + label.set_shape(self.feature_shape[-1]) + return (feature, spatial_mx, temporal_mx), (mean, std, label) + if not self.is_detection: + image, label = tf.numpy_function(self._get_item, + [images_index, label_index], + [self.image_dtype_tf, self.label_dtype_tf]) + if self.fixed_size: + image.set_shape(self.image_shape) + label.set_shape(self.label_shape) + else: + image, label = self._resize_image_label(image, label) + + try: + label = tf.squeeze(label) + except Exception: + logging.debug('Falied to get label.') + if self.label_dtype == "int": + label = tf.cast(label, tf.int32) + if self.data_format == "channels_last": + try: + image = tf.transpose(image, [1, 2, 0]) + label = tf.transpose(label, [1, 2, 0]) + except Exception: + logging.debug('Falied to transpose.') + else: + img, gt_bboxes, gt_bboxes_ignore, gt_labels_ignore, gt_labels, \ + ori_shape, img_shape, pad_shape, scale_factor, flip, target = tf.numpy_function( + self._get_item, + [images_index, label_index], + [tf.float32, tf.float32, tf.float32, tf.float32, tf.int64, + tf.int64, tf.int64, tf.int64, tf.float64, tf.bool, tf.int64]) + image = dict() + img_meta = dict() + img_meta["ori_shape"] = ori_shape + img_meta["img_shape"] = img_shape + img_meta["pad_shape"] = pad_shape + img_meta["scale_factor"] = scale_factor + img_meta["flip"] = flip + image["img"] = img + image["gt_bboxes"] = gt_bboxes + image["gt_bboxes_ignore"] = gt_bboxes_ignore + image["gt_labels"] = gt_labels + image["gt_labels_ignore"] = gt_labels_ignore + image["img_meta"] = img_meta + label = target + + return image, label + + def __len__(self): + """Return dataset length of train or valid.""" + if self.dataset.mode == 'train': + len = self._num_examples // self.args.batch_size + if self.dataset.world_size > 1: + len = len // self.dataset.world_size + len = int(len * self.repeat_ratio) + else: + len = self._num_examples // self.args.batch_size + return len + + def input_fn(self): + """Return the next `batch_size` examples from this data set.""" + if hasattr(self.dataset, "input_fn"): + return self.dataset.input_fn() + self._get_dataset_info() + dataset = tf.data.Dataset.from_tensor_slices( + (self.data_index, self.data_index)) + if self.dataset.mode == 'train' and self.dataset.world_size > 1: + dataset = dataset.shard(self.dataset.world_size, self.dataset.rank) + if self.dataset.mode == 'train': + dataset = dataset.repeat() + if self.args.shuffle: + dataset = dataset.shuffle(buffer_size=self._num_examples) + + if vega.is_npu_device(): + # esr cannot adapt to num_parallel_calls on NPU + dataset = dataset.map(self.data_map_func) + dataset = dataset.batch( + batch_size=self.args.batch_size, drop_remainder=self.args.drop_last) + else: + dataset = dataset.map(self.data_map_func, num_parallel_calls=tf.data.experimental.AUTOTUNE) + dataset = dataset.batch( + batch_size=self.args.batch_size, drop_remainder=self.args.drop_last) + dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE) + return dataset + + @property + def loader(self): + """Dataloader arrtribute which is a unified interface to generate the data. + + :return: a batch data + :rtype: dict, list, optional + """ + return self diff --git a/vega/datasets/tensorflow/imagenet.py b/vega/datasets/tensorflow/imagenet.py index 0cfe0e2..c438a1d 100644 --- a/vega/datasets/tensorflow/imagenet.py +++ b/vega/datasets/tensorflow/imagenet.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class of ImageNet Dataset.""" import os @@ -14,8 +20,8 @@ from official.r1.resnet.imagenet_preprocessing import preprocess_image from vega.common import ClassFactory, ClassType from vega.common import FileOps -from ..common.dataset import Dataset from vega.datasets.conf.imagenet import ImagenetConfig +from ..common.dataset import Dataset @ClassFactory.register(ClassType.DATASET) diff --git a/vega/datasets/transforms/AutoAugment.py b/vega/datasets/transforms/AutoAugment.py index 875d206..70ee8be 100644 --- a/vega/datasets/transforms/AutoAugment.py +++ b/vega/datasets/transforms/AutoAugment.py @@ -1,16 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for AutoContrast.""" -import numpy as np import random +import numpy as np from vega.common import ClassFactory, ClassType diff --git a/vega/datasets/transforms/AutoContrast.py b/vega/datasets/transforms/AutoContrast.py index c3dee5f..9757498 100644 --- a/vega/datasets/transforms/AutoContrast.py +++ b/vega/datasets/transforms/AutoContrast.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for AutoContrast.""" from PIL import ImageOps diff --git a/vega/datasets/transforms/BboxTransform.py b/vega/datasets/transforms/BboxTransform.py index 9bd74f7..0b3268e 100644 --- a/vega/datasets/transforms/BboxTransform.py +++ b/vega/datasets/transforms/BboxTransform.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for BboxTransform.""" import numpy as np diff --git a/vega/datasets/transforms/Brightness.py b/vega/datasets/transforms/Brightness.py index 79a65fa..b1d77a0 100644 --- a/vega/datasets/transforms/Brightness.py +++ b/vega/datasets/transforms/Brightness.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Brightness.""" from PIL import ImageEnhance -from .ops import float_parameter from vega.common import ClassFactory, ClassType +from .ops import float_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/Color.py b/vega/datasets/transforms/Color.py index 8145103..fcbfbe3 100644 --- a/vega/datasets/transforms/Color.py +++ b/vega/datasets/transforms/Color.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Color.""" from PIL import ImageEnhance -from .ops import float_parameter from vega.common import ClassFactory, ClassType +from .ops import float_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/Compose.py b/vega/datasets/transforms/Compose.py index 03bcdd5..97a83ac 100644 --- a/vega/datasets/transforms/Compose.py +++ b/vega/datasets/transforms/Compose.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Compose.""" diff --git a/vega/datasets/transforms/Compose_pair.py b/vega/datasets/transforms/Compose_pair.py index 92f99b7..35bb678 100644 --- a/vega/datasets/transforms/Compose_pair.py +++ b/vega/datasets/transforms/Compose_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Compose_pair.""" diff --git a/vega/datasets/transforms/Contrast.py b/vega/datasets/transforms/Contrast.py index e01c18e..9e4185a 100644 --- a/vega/datasets/transforms/Contrast.py +++ b/vega/datasets/transforms/Contrast.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Contrast.""" from PIL import ImageEnhance -from .ops import float_parameter from vega.common import ClassFactory, ClassType +from .ops import float_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/Cutout.py b/vega/datasets/transforms/Cutout.py index 032bc71..0f2df98 100644 --- a/vega/datasets/transforms/Cutout.py +++ b/vega/datasets/transforms/Cutout.py @@ -1,18 +1,24 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Cutout.""" import numpy as np import torch -from .ops import int_parameter from vega.common import ClassFactory, ClassType +from .ops import int_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/Equalize.py b/vega/datasets/transforms/Equalize.py index 23e7133..60a7eee 100644 --- a/vega/datasets/transforms/Equalize.py +++ b/vega/datasets/transforms/Equalize.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Equalize.""" from PIL import ImageOps diff --git a/vega/datasets/transforms/ImageTransform.py b/vega/datasets/transforms/ImageTransform.py index 798e84e..1c89749 100644 --- a/vega/datasets/transforms/ImageTransform.py +++ b/vega/datasets/transforms/ImageTransform.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for ImageTransform.""" import numpy as np diff --git a/vega/datasets/transforms/Invert.py b/vega/datasets/transforms/Invert.py index 6a3a51c..0732d81 100644 --- a/vega/datasets/transforms/Invert.py +++ b/vega/datasets/transforms/Invert.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Invert.""" from PIL import ImageOps diff --git a/vega/datasets/transforms/MaskTransform.py b/vega/datasets/transforms/MaskTransform.py index 4743007..7009471 100644 --- a/vega/datasets/transforms/MaskTransform.py +++ b/vega/datasets/transforms/MaskTransform.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for MaskTransform.""" import numpy as np diff --git a/vega/datasets/transforms/Normalize.py b/vega/datasets/transforms/Normalize.py index a2f98ab..4e1ce16 100644 --- a/vega/datasets/transforms/Normalize.py +++ b/vega/datasets/transforms/Normalize.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Color.""" from PIL import Image diff --git a/vega/datasets/transforms/Normalize_pair.py b/vega/datasets/transforms/Normalize_pair.py index fabfe0a..192103c 100644 --- a/vega/datasets/transforms/Normalize_pair.py +++ b/vega/datasets/transforms/Normalize_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Normalize_pair.""" import numpy as np diff --git a/vega/datasets/transforms/Posterize.py b/vega/datasets/transforms/Posterize.py index 65cb13e..84ea173 100644 --- a/vega/datasets/transforms/Posterize.py +++ b/vega/datasets/transforms/Posterize.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Posterize.""" from PIL import ImageOps -from .ops import int_parameter from vega.common import ClassFactory, ClassType +from .ops import int_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/RandomColor_pair.py b/vega/datasets/transforms/RandomColor_pair.py index f50b7fa..daf3e28 100644 --- a/vega/datasets/transforms/RandomColor_pair.py +++ b/vega/datasets/transforms/RandomColor_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomCrop_pair.""" import random diff --git a/vega/datasets/transforms/RandomCrop.py b/vega/datasets/transforms/RandomCrop.py index 59d5b6e..cd3b36d 100644 --- a/vega/datasets/transforms/RandomCrop.py +++ b/vega/datasets/transforms/RandomCrop.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomCrop_pair.""" import random diff --git a/vega/datasets/transforms/RandomCrop_pair.py b/vega/datasets/transforms/RandomCrop_pair.py index 04a1bc5..60156c1 100644 --- a/vega/datasets/transforms/RandomCrop_pair.py +++ b/vega/datasets/transforms/RandomCrop_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomCrop_pair.""" import random diff --git a/vega/datasets/transforms/RandomGaussianBlur_pair.py b/vega/datasets/transforms/RandomGaussianBlur_pair.py index 788813f..5705a31 100644 --- a/vega/datasets/transforms/RandomGaussianBlur_pair.py +++ b/vega/datasets/transforms/RandomGaussianBlur_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomGaussianBlur_pair.""" import random diff --git a/vega/datasets/transforms/RandomHorizontalFlip.py b/vega/datasets/transforms/RandomHorizontalFlip.py index d1d5fa8..497f61b 100644 --- a/vega/datasets/transforms/RandomHorizontalFlip.py +++ b/vega/datasets/transforms/RandomHorizontalFlip.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomHorizontalFlip_pair.""" import random diff --git a/vega/datasets/transforms/RandomHorizontalFlipWithBoxes.py b/vega/datasets/transforms/RandomHorizontalFlipWithBoxes.py index fe97401..cc9ca1c 100644 --- a/vega/datasets/transforms/RandomHorizontalFlipWithBoxes.py +++ b/vega/datasets/transforms/RandomHorizontalFlipWithBoxes.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomHorizontalFlipWithBoxes.""" import random diff --git a/vega/datasets/transforms/RandomHorizontalFlip_pair.py b/vega/datasets/transforms/RandomHorizontalFlip_pair.py index cd32c34..56ee0e7 100644 --- a/vega/datasets/transforms/RandomHorizontalFlip_pair.py +++ b/vega/datasets/transforms/RandomHorizontalFlip_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomHorizontalFlip_pair.""" import random diff --git a/vega/datasets/transforms/RandomMirrow_pair.py b/vega/datasets/transforms/RandomMirrow_pair.py index 790af39..8bc30b1 100644 --- a/vega/datasets/transforms/RandomMirrow_pair.py +++ b/vega/datasets/transforms/RandomMirrow_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomMirrow_pair.""" import numpy as np diff --git a/vega/datasets/transforms/RandomRotate90_pair.py b/vega/datasets/transforms/RandomRotate90_pair.py index 06522c9..8d1a3eb 100644 --- a/vega/datasets/transforms/RandomRotate90_pair.py +++ b/vega/datasets/transforms/RandomRotate90_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomRotate90_pair.""" import random diff --git a/vega/datasets/transforms/RandomRotate_pair.py b/vega/datasets/transforms/RandomRotate_pair.py index d4c62c4..be47eda 100644 --- a/vega/datasets/transforms/RandomRotate_pair.py +++ b/vega/datasets/transforms/RandomRotate_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomRotate_pair.""" import random diff --git a/vega/datasets/transforms/RandomVerticallFlip_pair.py b/vega/datasets/transforms/RandomVerticallFlip_pair.py index 6b54fa0..b66ffa6 100644 --- a/vega/datasets/transforms/RandomVerticallFlip_pair.py +++ b/vega/datasets/transforms/RandomVerticallFlip_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomVerticallFlip_pair.""" import random diff --git a/vega/datasets/transforms/Rescale_pair.py b/vega/datasets/transforms/Rescale_pair.py index dc9822b..3040902 100644 --- a/vega/datasets/transforms/Rescale_pair.py +++ b/vega/datasets/transforms/Rescale_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Rescale_pair.""" import random diff --git a/vega/datasets/transforms/Resize.py b/vega/datasets/transforms/Resize.py index 067d944..de74755 100644 --- a/vega/datasets/transforms/Resize.py +++ b/vega/datasets/transforms/Resize.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Color.""" +from collections import Iterable from PIL import Image from vega.common import ClassFactory, ClassType -from collections import Iterable @ClassFactory.register(ClassType.TRANSFORM) @@ -43,16 +49,16 @@ def __call__(self, img): raise TypeError('Got inappropriate size arg: {}'.format(self.size)) if isinstance(self.size, int): - w, h = img.size - if (w <= h and w == self.size) or (h <= w and h == self.size): + width, height = img.size + if (height <= width and height == self.size) or (width <= height and width == self.size): return img - if w < h: - ow = self.size - oh = int(self.size * h / w) + if width > height: + oh = self.size + ow = int(self.size * width / height) return img.resize((ow, oh), self.interpolation) else: - oh = self.size - ow = int(self.size * w / h) + ow = self.size + oh = int(self.size * height / width) return img.resize((ow, oh), self.interpolation) else: return img.resize(self.size[::-1], self.interpolation) diff --git a/vega/datasets/transforms/Rotate.py b/vega/datasets/transforms/Rotate.py index 60f9002..cb8ef7a 100644 --- a/vega/datasets/transforms/Rotate.py +++ b/vega/datasets/transforms/Rotate.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Rotate.""" import random -from .ops import int_parameter from vega.common import ClassFactory, ClassType +from .ops import int_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/SegMapTransform.py b/vega/datasets/transforms/SegMapTransform.py index 63724ee..af12b4c 100644 --- a/vega/datasets/transforms/SegMapTransform.py +++ b/vega/datasets/transforms/SegMapTransform.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for SegMapTransform.""" import mmcv diff --git a/vega/datasets/transforms/Sharpness.py b/vega/datasets/transforms/Sharpness.py index 55356d2..7803e06 100644 --- a/vega/datasets/transforms/Sharpness.py +++ b/vega/datasets/transforms/Sharpness.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Sharpness.""" from PIL import ImageEnhance -from .ops import float_parameter from vega.common import ClassFactory, ClassType +from .ops import float_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/Shear_X.py b/vega/datasets/transforms/Shear_X.py index e00de90..af27734 100644 --- a/vega/datasets/transforms/Shear_X.py +++ b/vega/datasets/transforms/Shear_X.py @@ -1,18 +1,24 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Shear_X.""" import random from PIL import Image -from .ops import float_parameter from vega.common import ClassFactory, ClassType +from .ops import float_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/Shear_Y.py b/vega/datasets/transforms/Shear_Y.py index 193a6e4..48bbaa7 100644 --- a/vega/datasets/transforms/Shear_Y.py +++ b/vega/datasets/transforms/Shear_Y.py @@ -1,18 +1,24 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Shear_Y.""" import random from PIL import Image -from .ops import float_parameter from vega.common import ClassFactory, ClassType +from .ops import float_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/Solarize.py b/vega/datasets/transforms/Solarize.py index 35f2acd..372ff92 100644 --- a/vega/datasets/transforms/Solarize.py +++ b/vega/datasets/transforms/Solarize.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Solarize.""" from PIL import ImageOps -from .ops import int_parameter from vega.common import ClassFactory, ClassType +from .ops import int_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/ToTensor.py b/vega/datasets/transforms/ToTensor.py index ab79f33..60223b6 100644 --- a/vega/datasets/transforms/ToTensor.py +++ b/vega/datasets/transforms/ToTensor.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for RandomCrop_pair.""" from PIL import Image diff --git a/vega/datasets/transforms/Translate_X.py b/vega/datasets/transforms/Translate_X.py index cf41c3f..ec5a80b 100644 --- a/vega/datasets/transforms/Translate_X.py +++ b/vega/datasets/transforms/Translate_X.py @@ -1,18 +1,24 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Translate_X.""" import random from PIL import Image -from .ops import int_parameter from vega.common import ClassFactory, ClassType +from .ops import int_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/Translate_Y.py b/vega/datasets/transforms/Translate_Y.py index 8ce1f37..cd5ae75 100644 --- a/vega/datasets/transforms/Translate_Y.py +++ b/vega/datasets/transforms/Translate_Y.py @@ -1,18 +1,24 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Translate_Y.""" import random from PIL import Image -from .ops import int_parameter from vega.common import ClassFactory, ClassType +from .ops import int_parameter @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/__init__.py b/vega/datasets/transforms/__init__.py index 4f4e51d..cd2049f 100644 --- a/vega/datasets/transforms/__init__.py +++ b/vega/datasets/transforms/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import transforms.""" diff --git a/vega/datasets/transforms/ops.py b/vega/datasets/transforms/ops.py index 847aaa9..2f03014 100644 --- a/vega/datasets/transforms/ops.py +++ b/vega/datasets/transforms/ops.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is the function of scale.""" PARAMETER_MAX = 10 diff --git a/vega/datasets/transforms/pytorch/Numpy2Tensor.py b/vega/datasets/transforms/pytorch/Numpy2Tensor.py index 8f3049d..19c7c60 100644 --- a/vega/datasets/transforms/pytorch/Numpy2Tensor.py +++ b/vega/datasets/transforms/pytorch/Numpy2Tensor.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for Numpy2Tensor.""" import torch diff --git a/vega/datasets/transforms/pytorch/PBATransformer.py b/vega/datasets/transforms/pytorch/PBATransformer.py index fbc8099..9e7d7e9 100644 --- a/vega/datasets/transforms/pytorch/PBATransformer.py +++ b/vega/datasets/transforms/pytorch/PBATransformer.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for PBATransformer.""" import numpy as np -from ..Cutout import Cutout from vega.common import ClassFactory, ClassType +from ..Cutout import Cutout @ClassFactory.register(ClassType.TRANSFORM) diff --git a/vega/datasets/transforms/pytorch/ToPILImage_pair.py b/vega/datasets/transforms/pytorch/ToPILImage_pair.py index 0738103..709c92b 100644 --- a/vega/datasets/transforms/pytorch/ToPILImage_pair.py +++ b/vega/datasets/transforms/pytorch/ToPILImage_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for ToPILImage_pair.""" from torchvision.transforms import functional as F diff --git a/vega/datasets/transforms/pytorch/ToTensor_pair.py b/vega/datasets/transforms/pytorch/ToTensor_pair.py index 197c8f5..ebcf085 100644 --- a/vega/datasets/transforms/pytorch/ToTensor_pair.py +++ b/vega/datasets/transforms/pytorch/ToTensor_pair.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is a class for ToTensor_pair.""" import torch diff --git a/vega/evaluator/__init__.py b/vega/evaluator/__init__.py index 2361179..af26ba3 100644 --- a/vega/evaluator/__init__.py +++ b/vega/evaluator/__init__.py @@ -1,20 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import and register evaluator automatically.""" from vega.common.class_factory import ClassFactory - ClassFactory.lazy_register("vega.evaluator", { "device_evaluator": ["DeviceEvaluator"], "host_evaluator": ["HostEvaluator"], "evaluator": ["Evaluator"], + "custom": ["CustomEvaluator"] }) diff --git a/vega/evaluator/conf.py b/vega/evaluator/conf.py index 46ea456..157fd34 100644 --- a/vega/evaluator/conf.py +++ b/vega/evaluator/conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Conf for Pipeline.""" from vega.common import ClassType @@ -50,6 +56,8 @@ class DeviceEvaluatorConfig(ConfigSerializable): is_fusion = False reshape_batch_size = 1 save_intermediate_file = False + custom = None + repeat_times = 10 class EvaluatorConfig(ConfigSerializable): diff --git a/vega/evaluator/custom.py b/vega/evaluator/custom.py new file mode 100644 index 0000000..b3bbb7b --- /dev/null +++ b/vega/evaluator/custom.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Custom Evaluator.""" + +import os +import numpy as np +from vega import ClassFactory, ClassType + + +@ClassFactory.register(ClassType.DEVICE_EVALUATOR) +class CustomEvaluator(): + """Define custom evaluator.""" + + def __init__(self, device_evaluator): + pass + + def get_data(self): + """Get the evaluate data.""" + return np.random.random([1, 12, 320, 320]).astype(np.float32) + + def export_model(self, init_model): + """Export the model to onnx/air and etc.""" + from mindspore.train.serialization import export + from mindspore import Tensor + from mindspore.common.api import _cell_graph_executor + _cell_graph_executor.set_jit_config(jit_config={"jit_level": "o0"}) + fake_input = np.random.random([1, 12, 320, 320]).astype(np.float32) + save_name = os.path.join("./", "ms2air.air") + export(init_model, Tensor(fake_input), Tensor(640), file_name=save_name, file_format='AIR') + model = save_name + return model diff --git a/vega/evaluator/device_evaluator.py b/vega/evaluator/device_evaluator.py index 22e37e6..a66c7bf 100644 --- a/vega/evaluator/device_evaluator.py +++ b/vega/evaluator/device_evaluator.py @@ -1,28 +1,34 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """HostEvaluator used to do evaluate process on gpu.""" -import os import datetime import logging -import numpy as np +import os import traceback +import numpy as np import vega from vega.common import ClassFactory, ClassType from vega.common.wrappers import train_process_wrapper from vega.report import ReportClient from vega.trainer.utils import WorkerTypes -from .tools.evaluate_davinci_bolt import evaluate from .conf import DeviceEvaluatorConfig from .evaluator import Evaluator +from .tools.evaluate_davinci_bolt import evaluate @ClassFactory.register(ClassType.DEVICE_EVALUATOR) @@ -44,7 +50,6 @@ def __init__(self, worker_info=None, model=None, saved_folder=None, saved_step_n """Init DeviceEvaluator.""" super(Evaluator, self).__init__() self.config = DeviceEvaluatorConfig() - # self.backend = self.config.backend self.hardware = self.config.hardware self.remote_host = self.config.remote_host self.intermediate_format = self.config.intermediate_format @@ -63,221 +68,282 @@ def __init__(self, worker_info=None, model=None, saved_folder=None, saved_step_n self.weights_file = weights_file self.saved_folder = saved_folder self.saved_step_name = saved_step_name + self.custom = self.config.custom + self.repeat_times = self.config.repeat_times - def valid(self): # noqa: C901 + def _get_data(self): + def _get_data_default(): + if vega.is_torch_backend(): + for batch in self.valid_loader: + data = batch[0] + break + else: + for batch in self.valid_loader.create_dict_iterator(): + data = batch["image"] + data = data.asnumpy() + break + return data + + if self.custom is None: + data = _get_data_default() + reshape_batch_size = self.config.reshape_batch_size + if reshape_batch_size and isinstance(reshape_batch_size, int): + data = data[0:reshape_batch_size] + else: + custom_cls = ClassFactory.get_cls(ClassType.DEVICE_EVALUATOR, self.custom)(self) + self.custom = custom_cls + data = self.custom.get_data() + + return data + + def valid(self): """Validate the latency in Davinci or bolt.""" test_data = os.path.join(self.get_local_worker_path(self.step_name, self.worker_id), "input.bin") latency_sum = 0 data_num = 0 global_step = 0 - error_threshold = int(len(self.valid_loader) * 0.05) - error_count = 0 - repeat_times = 1 now_time = datetime.datetime.now().strftime('%Y%m%d%H%M%S%f') job_id = self.step_name + "_" + str(self.worker_id) + "_" + now_time logging.info("The job id of evaluate service is {}.".format(job_id)) if vega.is_torch_backend(): - import torch - from vega.metrics.pytorch import Metrics - metrics = Metrics(self.config.metric) - for step, batch in enumerate(self.valid_loader): - if isinstance(batch, list) or isinstance(batch, tuple): - data = batch[0] - target = batch[1] - else: - raise ValueError("The dataset format must be tuple or list," - "but get {}.".format(type(batch))) - if not self.calculate_metric: - repeat_times = 10 - reshape_batch_size = self.config.reshape_batch_size - if reshape_batch_size and isinstance(reshape_batch_size, int): - data = data[0:reshape_batch_size] - target = target[0:reshape_batch_size] - - if not self.calculate_metric and global_step >= 1: - break - if torch.is_tensor(data): - data = data.numpy() - data.tofile(test_data) - reuse_model = False if global_step == 0 else True - results = evaluate(backend="pytorch", hardware=self.hardware, remote_host=self.remote_host, - model=self.model, weight=None, test_data=test_data, input_shape=data.shape, - reuse_model=reuse_model, job_id=job_id, repeat_times=repeat_times, - precision=self.precision, intermediate_format=self.intermediate_format, - opset_version=self.opset_version, - save_intermediate_file=self.config.save_intermediate_file) - if self.calculate_metric and results.get("status") != "sucess" and error_count <= error_threshold: - error_count += 1 - break - latency = np.float(results.get("latency")) - data_num += 1 - latency_sum += latency - - if global_step == 0 and self.calculate_metric: - self.model.eval() - real_output = self.model(torch.Tensor(data)) - real_output = real_output.detach().numpy() - - if isinstance(real_output, tuple): - output_shape = real_output[0].shape - else: - output_shape = real_output.shape - if self.calculate_metric: - out_data = np.array(results.get("out_data")).astype(np.float32) - output = out_data.reshape(output_shape) - output = torch.Tensor(output) - metrics(output, target) - pfms = metrics.results - else: - pfms = {} + pfms, latency_sum, data_num = self._torch_valid(test_data, job_id) + elif vega.is_tf_backend(): + pfms, latency_sum, data_num = self._tf_valid(test_data, latency_sum, data_num, global_step, job_id) + elif vega.is_ms_backend(): + pfms, latency_sum, data_num = self._ms_valid(test_data, job_id) + latency_avg = latency_sum / data_num + logging.info("The latency in {} is {} ms.".format(self.hardware, latency_avg)) - global_step += 1 - if global_step % self.config.report_freq == 0: - logging.info("step [{}/{}], latency [{}], valid metric [{}]".format( - step + 1, len(self.valid_loader), latency, pfms)) + if self.config.evaluate_latency: + pfms["latency"] = latency_avg + logging.info("valid performance: {}".format(pfms)) + return pfms - elif vega.is_tf_backend(): - import tensorflow as tf - from vega.metrics.tensorflow.metrics import Metrics - tf.reset_default_graph() - valid_data = self.valid_loader.input_fn() - metrics = Metrics(self.config.metric) - iterator = valid_data.make_one_shot_iterator() - one_element = iterator.get_next() - total_metric = {} - avg_metric = {} - weight_file = self.get_local_worker_path(self.step_name, self.worker_id) - for step in range(len(self.valid_loader)): - with tf.Session() as sess: - batch = sess.run(one_element) + def _torch_valid(self, test_data, job_id): + import torch + if self.calculate_metric: + return self._torch_valid_metric(test_data, job_id) + + data = self._get_data() + if torch.is_tensor(data): + data = data.numpy() + data.tofile(test_data) + results = evaluate(backend="pytorch", hardware=self.hardware, remote_host=self.remote_host, + model=self.model, weight=None, test_data=test_data, input_shape=data.shape, + reuse_model=False, job_id=job_id, precision=self.precision, + cal_metric=self.calculate_metric, + intermediate_format=self.intermediate_format, + opset_version=self.opset_version, repeat_times=self.repeat_times, + save_intermediate_file=self.config.save_intermediate_file, custom=self.custom) + + latency = np.float(results.get("latency")) + data_num = 1 + pfms = {} + return pfms, latency, data_num + + def _torch_valid_metric(self, test_data, job_id): + import torch + from vega.metrics.pytorch import Metrics + metrics = Metrics(self.config.metric) + latency_sum = 0 + error_count = 0 + error_threshold = int(len(self.valid_loader) * 0.05) + for step, batch in enumerate(self.valid_loader): + if isinstance(batch, list) or isinstance(batch, tuple): data = batch[0] target = batch[1] - if not self.calculate_metric: - repeat_times = 10 - reshape_batch_size = self.config.reshape_batch_size - if reshape_batch_size and isinstance(reshape_batch_size, int): - data = data[0:reshape_batch_size] - target = target[0:reshape_batch_size] - - if not self.calculate_metric and global_step >= 1: - break - data.tofile(test_data) - - if global_step == 0 and self.calculate_metric: - input_tf = tf.placeholder(tf.float32, shape=data.shape, name='input_tf') - self.model.training = False - output = self.model(input_tf) - if isinstance(output, tuple): - output_shape = output[0].shape - else: - output_shape = output.shape - - reuse_model = False if global_step == 0 else True - results = evaluate(backend="tensorflow", hardware=self.hardware, remote_host=self.remote_host, - model=self.model, weight=weight_file, test_data=test_data, input_shape=data.shape, - reuse_model=reuse_model, job_id=job_id, quantize=self.quantize, - repeat_times=repeat_times, precision=self.precision, - save_intermediate_file=self.config.save_intermediate_file) - if self.calculate_metric and results.get("status") != "sucess" and error_count <= error_threshold: - error_count += 1 - break - latency = np.float(results.get("latency")) - data_num += 1 - latency_sum += latency - - if self.calculate_metric: - out_data = np.array(results.get("out_data")).astype(np.float32) - output = out_data.reshape(output_shape) - target_tf = tf.placeholder(target.dtype, shape=target.shape, name='target_tf') - output_tf = tf.placeholder(output.dtype, shape=output.shape, name='output_tf') - metrics_dict = metrics(output_tf, target_tf) - with tf.Session() as sess: - sess.run(tf.local_variables_initializer()) - for name, metric in metrics_dict.items(): - tf_metric, tf_metric_update = metric - sess.run(tf_metric_update, feed_dict={output_tf: output, target_tf: target}) - eval_value = sess.run(tf_metric) - if global_step == 0: - total_metric[name] = eval_value - else: - total_metric[name] += eval_value - avg_metric[name] = total_metric[name] / (global_step + 1) - metrics.update(avg_metric) - pfms = metrics.results + else: + raise ValueError("The dataset format must be tuple or list," + "but get {}.".format(type(batch))) + if torch.is_tensor(data): + data = data.numpy() + data.tofile(test_data) + reuse_model = False if step == 0 else True + results = evaluate(backend="pytorch", hardware=self.hardware, remote_host=self.remote_host, + model=self.model, weight=None, test_data=test_data, input_shape=data.shape, + reuse_model=reuse_model, job_id=job_id, + precision=self.precision, cal_metric=self.calculate_metric, + intermediate_format=self.intermediate_format, + opset_version=self.opset_version, repeat_times=self.repeat_times, + save_intermediate_file=self.config.save_intermediate_file) + if results.get("status") != "sucess" and error_count <= error_threshold: + error_count += 1 + break + latency = np.float(results.get("latency")) + latency_sum += latency + + if step == 0: + self.model.eval() + real_output = self.model(torch.Tensor(data)) + real_output = real_output.detach().numpy() + + if isinstance(real_output, tuple): + output_shape = real_output[0].shape else: - pfms = {} + output_shape = real_output.shape + out_data = np.array(results.get("out_data")).astype(np.float32) + output = out_data.reshape(output_shape) + output = torch.Tensor(output) + metrics(output, target) + pfms = metrics.results - global_step += 1 + if step % self.config.report_freq == 0: + logging.info("step [{}/{}], latency [{}], valid metric [{}]".format( + step + 1, len(self.valid_loader), latency, pfms)) - if global_step % self.config.report_freq == 0: - logging.info("step [{}/{}], latency [{}], valid metric [{}]".format( - step + 1, len(self.valid_loader), latency, pfms)) + return pfms, latency_sum, step - elif vega.is_ms_backend(): - import mindspore - from vega.metrics.mindspore import Metrics - metrics = Metrics(self.config.metric) - for step, batch in enumerate(self.valid_loader.create_dict_iterator()): - data = batch["image"] - target = batch["label"] - if not self.calculate_metric: - repeat_times = 10 - reshape_batch_size = self.config.reshape_batch_size - if reshape_batch_size and isinstance(reshape_batch_size, int): - data = data[0:reshape_batch_size] - target = target[0:reshape_batch_size] - - if not self.calculate_metric and global_step >= 1: - break - data = data.asnumpy() - data.tofile(test_data) - reuse_model = False if global_step == 0 else True - results = evaluate(backend="mindspore", hardware=self.hardware, remote_host=self.remote_host, - model=self.model, weight=None, test_data=test_data, input_shape=data.shape, - reuse_model=reuse_model, job_id=job_id, repeat_times=repeat_times, - precision=self.precision, save_intermediate_file=self.config.save_intermediate_file) - latency = np.float(results.get("latency")) - latency_sum += latency - data_num += 1 - - if global_step == 0 and self.calculate_metric: - real_output = self.model(mindspore.Tensor(data)) - real_output = real_output.asnumpy() - if isinstance(real_output, tuple): - output_shape = real_output[0].shape - else: - output_shape = real_output.shape - if self.calculate_metric: - out_data = np.array(results.get("out_data")).astype(np.float32) - output = out_data.reshape(output_shape) - output = mindspore.Tensor(output) - metrics(output, target) - pfms = metrics.results + def _tf_valid(self, test_data, latency_sum, data_num, global_step, job_id): + import tensorflow as tf + from vega.metrics.tensorflow.metrics import Metrics + error_count = 0 + error_threshold = int(len(self.valid_loader) * 0.05) + tf.reset_default_graph() + valid_data = self.valid_loader.input_fn() + metrics = Metrics(self.config.metric) + iterator = valid_data.make_one_shot_iterator() + one_element = iterator.get_next() + total_metric = {} + avg_metric = {} + weight_file = self.get_local_worker_path(self.step_name, self.worker_id) + for step in range(len(self.valid_loader)): + with tf.Session() as sess: + batch = sess.run(one_element) + data = batch[0] + target = batch[1] + if not self.calculate_metric: + reshape_batch_size = self.config.reshape_batch_size + if reshape_batch_size and isinstance(reshape_batch_size, int): + data = data[0:reshape_batch_size] + target = target[0:reshape_batch_size] + + if not self.calculate_metric and global_step >= 1: + break + data.tofile(test_data) + + if global_step == 0 and self.calculate_metric: + input_tf = tf.placeholder(tf.float32, shape=data.shape, name='input_tf') + self.model.training = False + output = self.model(input_tf) + if isinstance(output, tuple): + output_shape = output[0].shape else: - pfms = {} + output_shape = output.shape - global_step += 1 - if global_step % self.config.report_freq == 0: - logging.info("step [{}/{}], latency [{}], valid metric [{}]".format( - step + 1, len(self.valid_loader), latency, pfms)) + reuse_model = False if global_step == 0 else True + results = evaluate(backend="tensorflow", hardware=self.hardware, remote_host=self.remote_host, + model=self.model, weight=weight_file, test_data=test_data, input_shape=data.shape, + reuse_model=reuse_model, job_id=job_id, quantize=self.quantize, + repeat_times=self.repeat_times, precision=self.precision, + cal_metric=self.calculate_metric, + save_intermediate_file=self.config.save_intermediate_file) + if self.calculate_metric and results.get("status") != "sucess" and error_count <= error_threshold: + error_count += 1 + break + latency = np.float(results.get("latency")) + data_num += 1 + latency_sum += latency - latency_avg = latency_sum / data_num - logging.info("The latency in {} is {} ms.".format(self.hardware, latency_avg)) + if self.calculate_metric: + pfms = self._calc_tf_metric( + results, output_shape, target, metrics, global_step, total_metric, avg_metric) + else: + pfms = {} - if self.config.evaluate_latency: - pfms["latency"] = latency_avg - logging.info("valid performance: {}".format(pfms)) - return pfms + global_step += 1 + + if global_step % self.config.report_freq == 0: + logging.info("step [{}/{}], latency [{}], valid metric [{}]".format( + step + 1, len(self.valid_loader), latency, pfms)) + return pfms, latency_sum, data_num + + def _calc_tf_metric(self, results, output_shape, target, metrics, global_step, total_metric, avg_metric): + import tensorflow as tf + out_data = np.array(results.get("out_data")).astype(np.float32) + output = out_data.reshape(output_shape) + target_tf = tf.placeholder(target.dtype, shape=target.shape, name='target_tf') + output_tf = tf.placeholder(output.dtype, shape=output.shape, name='output_tf') + metrics_dict = metrics(output_tf, target_tf) + with tf.Session() as sess: + sess.run(tf.local_variables_initializer()) + for name, metric in metrics_dict.items(): + tf_metric, tf_metric_update = metric + sess.run(tf_metric_update, feed_dict={output_tf: output, target_tf: target}) + eval_value = sess.run(tf_metric) + if global_step == 0: + total_metric[name] = eval_value + else: + total_metric[name] += eval_value + avg_metric[name] = total_metric[name] / (global_step + 1) + metrics.update(avg_metric) + return metrics.results + + def _ms_valid(self, test_data, job_id): + if self.calculate_metric: + return self._ms_valid_metric(test_data, job_id) + + data = self._get_data() + data.tofile(test_data) + results = evaluate( + backend="mindspore", hardware=self.hardware, remote_host=self.remote_host, + model=self.model, weight=None, test_data=test_data, input_shape=data.shape, + reuse_model=False, job_id=job_id, precision=self.precision, cal_metric=self.calculate_metric, + repeat_times=self.repeat_times, + save_intermediate_file=self.config.save_intermediate_file, custom=self.custom) + latency = np.float(results.get("latency")) + pfms = {} + data_num = 1 + return pfms, latency, data_num + + def _ms_valid_metric(self, test_data, job_id): + import mindspore + from vega.metrics.mindspore import Metrics + metrics = Metrics(self.config.metric) + latency_sum = 0 + for step, batch in enumerate(self.valid_loader.create_dict_iterator()): + data = batch["image"] + target = batch["label"] + data = data.asnumpy() + data.tofile(test_data) + reuse_model = False if step == 0 else True + results = evaluate( + backend="mindspore", hardware=self.hardware, remote_host=self.remote_host, + model=self.model, weight=None, test_data=test_data, input_shape=data.shape, + reuse_model=reuse_model, job_id=job_id, precision=self.precision, cal_metric=self.calculate_metric, + repeat_times=self.repeat_times, + save_intermediate_file=self.config.save_intermediate_file) + latency = np.float(results.get("latency")) + latency_sum += latency + + if step == 0: + real_output = self.model(mindspore.Tensor(data)) + real_output = real_output.asnumpy() + if isinstance(real_output, tuple): + output_shape = real_output[0].shape + else: + output_shape = real_output.shape + + out_data = np.array(results.get("out_data")).astype(np.float32) + output = out_data.reshape(output_shape) + output = mindspore.Tensor(output) + metrics(output, target) + pfms = metrics.results + + if step % self.config.report_freq == 0: + logging.info("step [{}/{}], latency [{}], valid metric [{}]".format( + step + 1, len(self.valid_loader), latency, pfms)) + return pfms, latency_sum, step @train_process_wrapper def train_process(self): """Validate process for the model validate worker.""" try: self.load_model() - self.valid_loader = self._init_dataloader(mode='test') + if self.custom is None: + self.valid_loader = self._init_dataloader(mode='test') performance = self.valid() ReportClient().update(self.step_name, self.worker_id, performance=performance) logging.info(f"finished device evaluation, id: {self.worker_id}, performance: {performance}") - except Exception: - logging.error(traceback.format_exc()) - logging.error("Failed to evalute on device.") + except Exception as e: + logging.debug(traceback.format_exc()) + logging.error(f"Failed to evalute on device, message: {e}.") diff --git a/vega/evaluator/evaluator.py b/vega/evaluator/evaluator.py index 125eed1..d1f7496 100644 --- a/vega/evaluator/evaluator.py +++ b/vega/evaluator/evaluator.py @@ -1,28 +1,35 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Evaluate used to do evaluate process.""" + import copy import logging import os -import vega import glob +import vega from vega.common import ClassFactory, ClassType from vega.trainer.distributed_worker import DistributedWorker from vega.trainer.utils import WorkerTypes from vega.common import FileOps, Config from vega.datasets import Adapter -from .conf import EvaluatorConfig from vega.model_zoo import ModelZoo from vega.networks.model_config import ModelConfig from vega.core.pipeline.conf import PipeStepConfig +from .conf import EvaluatorConfig logger = logging.getLogger(__name__) diff --git a/vega/evaluator/host_evaluator.py b/vega/evaluator/host_evaluator.py index a2b24e2..f1921f7 100644 --- a/vega/evaluator/host_evaluator.py +++ b/vega/evaluator/host_evaluator.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """HostEvaluator used to do evaluate process on gpu.""" @@ -181,9 +187,9 @@ def train_process(self): performance = self.valid(self.valid_loader) ReportClient().update(self.step_name, self.worker_id, performance=performance) logging.info(f"finished host evaluation, id: {self.worker_id}, performance: {performance}") - except Exception: - logging.error(traceback.format_exc()) - logging.error("Failed to evalute on host.") + except Exception as e: + logging.debug(traceback.format_exc()) + logging.error(f"Failed to evalute on host, message: {e}") def _init_session_config(self): import tensorflow as tf @@ -193,6 +199,8 @@ def _init_session_config(self): return sess_config elif vega.is_npu_device(): from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig + # Initialize npu bridge + from npu_bridge import npu_init sess_config = tf.ConfigProto() sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add() diff --git a/vega/evaluator/tools/evaluate_davinci_bolt.py b/vega/evaluator/tools/evaluate_davinci_bolt.py index dac6e4a..77efe2b 100644 --- a/vega/evaluator/tools/evaluate_davinci_bolt.py +++ b/vega/evaluator/tools/evaluate_davinci_bolt.py @@ -1,25 +1,28 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The EvaluateService of client.""" -import os import logging -import subprocess -import pickle +import os import numpy as np from .rest import post -# flake8: noqa: C901 def evaluate(backend, hardware, remote_host, model, weight, test_data, input_shape=None, reuse_model=False, job_id=None, - quantize=False, repeat_times=1, precision='FP32', **kwargs): + quantize=False, repeat_times=10, precision='FP32', cal_metric=False, **kwargs): """Evaluate interface of the EvaluateService. :param backend: the backend can be one of "tensorflow", "caffe" and "pytorch" @@ -37,14 +40,7 @@ def evaluate(backend, hardware, remote_host, model, weight, test_data, input_sha :return: the latency in Davinci or Bolt :rtype: float """ - if backend not in ["tensorflow", "caffe", "pytorch", "mindspore"]: - raise ValueError("The backend only support tensorflow, caffe, pytorch and mindspore.") - - if hardware not in ["Davinci", "Bolt", "Kirin990_npu"]: - raise ValueError("The hardware only support Davinci and Bolt.") - - if input_shape is None: - raise ValueError("The input shape must be provided.") + _check_backend_hardware_shape(backend, hardware, input_shape) if not reuse_model: base_save_dir = os.path.dirname(test_data) @@ -62,7 +58,7 @@ def evaluate(backend, hardware, remote_host, model, weight, test_data, input_sha upload_data = {"data_file": data_file} evaluate_config = {"backend": backend, "hardware": hardware, "remote_host": remote_host, "reuse_model": reuse_model, - "job_id": job_id, "repeat_times": repeat_times, "precision": precision} + "job_id": job_id, "repeat_times": repeat_times, "precision": precision, "cal_metric": cal_metric} if backend == 'tensorflow': shape_list = [str(s) for s in input_shape] shape_cfg = {"input_shape": "Placeholder:" + ",".join(shape_list)} @@ -72,8 +68,32 @@ def evaluate(backend, hardware, remote_host, model, weight, test_data, input_sha out_node_cfg = {"out_nodes": out_node_name} evaluate_config.update(out_node_cfg) + evaluate_result = _post_request(remote_host, upload_data, test_data, evaluate_config) + + if not kwargs.get("save_intermediate_file", False): + if os.path.exists(model): + os.remove(model) + if weight and os.path.isfile(weight) and os.path.exists(weight): + os.remove(weight) + if os.path.exists(test_data): + os.remove(test_data) + + return evaluate_result + + +def _check_backend_hardware_shape(backend, hardware, input_shape): + if backend not in ["tensorflow", "caffe", "pytorch", "mindspore"]: + raise ValueError("The backend only support tensorflow, caffe, pytorch and mindspore.") + + if hardware not in ["Davinci", "Bolt", "Kirin990_npu"]: + raise ValueError("The hardware only support Davinci and Bolt.") + + if input_shape is None: + raise ValueError("The input shape must be provided.") + + +def _post_request(remote_host, upload_data, test_data, evaluate_config): evaluate_result = post(host=remote_host, files=upload_data, data=evaluate_config) - # evaluate_result = requests.get(remote_host, proxies={"http": None}).json() if evaluate_result.get("status") != "sucess": logging.warning( "Evaluate failed and will try again, the status is {}, the timestamp is {}, \ @@ -101,15 +121,6 @@ def evaluate(backend, hardware, remote_host, model, weight, test_data, input_sha else: logging.info("Evaluate sucess! The latency is {}.".format(evaluate_result["latency"])) - if not kwargs.get("save_intermediate_file", False): - # clean intermediate file - if os.path.exists(model): - os.remove(model) - if weight and os.path.isfile(weight) and os.path.exists(weight): - os.remove(weight) - if os.path.exists(test_data): - os.remove(test_data) - return evaluate_result @@ -130,35 +141,13 @@ def preprocessing_model(backend, hardware, model, weight, input_shape, base_save :type base_save_dir: str """ if backend == "pytorch": - if hardware == "Bolt": - opset_version = kwargs["opset_version"] - from .pytorch2onnx import pytorch2onnx - model = pytorch2onnx(model, input_shape, base_save_dir, opset_version) - elif kwargs["intermediate_format"] == "caffe": - model_file = os.path.join(base_save_dir, "torch_model.pkl") - shape_file = os.path.join(base_save_dir, "input_shape.pkl") - with open(model_file, "wb") as f: - pickle.dump(model, f) - with open(shape_file, "wb") as f: - pickle.dump(input_shape, f) - env = os.environ.copy() - abs_path = os.path.abspath(__file__) - cur_dir = os.path.dirname(abs_path) - shell_file = os.path.join(cur_dir, "pytorch2caffe.sh") - command_line = ["bash", shell_file, cur_dir, model_file, shape_file] - try: - subprocess.check_output(command_line, env=env) - except subprocess.CalledProcessError as exc: - logging.error("convert torch model to caffe model failed.\ - the return code is: {}.".format(exc.returncode)) - model = os.path.join(base_save_dir, "torch2caffe.prototxt") - weight = os.path.join(base_save_dir, "torch2caffe.caffemodel") - backend = "caffe" + if kwargs.get("custom", None) is not None: + model = kwargs.get("custom").export_model(model) else: from .pytorch2onnx import pytorch2onnx opset_version = kwargs["opset_version"] model = pytorch2onnx(model, input_shape, base_save_dir, opset_version) - backend = "onnx" + backend = "onnx" elif backend == "tensorflow": pb_model_file = os.path.join(base_save_dir, "tf_model.pb") if os.path.exists(pb_model_file): @@ -167,12 +156,15 @@ def preprocessing_model(backend, hardware, model, weight, input_shape, base_save freeze_graph(model, weight, pb_model_file, input_shape, quantize, test_data) model = pb_model_file elif backend == "mindspore": - from mindspore.train.serialization import export - from mindspore import Tensor - fake_input = np.random.random(input_shape).astype(np.float32) - save_name = os.path.join(base_save_dir, "ms2air.air") - export(model, Tensor(fake_input), file_name=save_name, file_format='AIR') - model = save_name + if kwargs.get("custom", None) is not None: + model = kwargs.get("custom").export_model(model) + else: + from mindspore.train.serialization import export + from mindspore import Tensor + fake_input = np.random.random(input_shape).astype(np.float32) + save_name = os.path.join(base_save_dir, "ms2air.air") + export(model, Tensor(fake_input), file_name=save_name, file_format='AIR') + model = save_name return model, weight, backend @@ -197,7 +189,6 @@ def freeze_graph(model, weight_file, output_graph_file, input_shape, quantize, t output_name = [output.name.split(":")[0]] with tf.Session() as sess: sess.run(tf.global_variables_initializer()) - # if weight_file is None, only latency can be evaluated if weight_file is not None: saver = tf.train.Saver() last_weight_file = tf.train.latest_checkpoint(weight_file) diff --git a/vega/evaluator/tools/pytorch2caffe.py b/vega/evaluator/tools/pytorch2caffe.py deleted file mode 100644 index 18c825f..0000000 --- a/vega/evaluator/tools/pytorch2caffe.py +++ /dev/null @@ -1,56 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""The tools to convert the pytorch model to onnx model.""" -import sys -import torch -from torch.autograd import Variable -import logging -import pickle -import os - -abs_path = os.path.abspath(__file__) -third_party_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(abs_path)))) -PytorchToCaffe_path = os.path.join(third_party_path, "third_party/PytorchToCaffe-master/") -sys.path.append(PytorchToCaffe_path) - - -def pytorch2caffe(model, input_shape, save_dir): - """Convert the pytorch model to onnx model. - - :param model: pytorch model class - :type model: class - :param input_shape: the shape of input - :type input_shape: list - :param onnx_save_path: the path and filename to save the onnx model file - :type onnx_save_path: str - """ - import pytorch_to_caffe # noqa - name = 'torch2caffe' - model = model.cpu() - model.eval() - input = Variable(torch.ones(input_shape)) - pytorch_to_caffe.trans_net(model, input, name) - prototxt_file = os.path.join(save_dir, "torch2caffe.prototxt") - caffemodel_file = os.path.join(save_dir, "torch2caffe.caffemodel") - pytorch_to_caffe.save_prototxt(prototxt_file) - pytorch_to_caffe.save_caffemodel(caffemodel_file) - logging.info("pytorch2caffe finished.") - - -if __name__ == "__main__": - model_file = sys.argv[1] - shape_file = sys.argv[2] - save_dir = os.path.dirname(model_file) - with open(model_file, "rb") as f: - model = pickle.load(f) - with open(shape_file, "rb") as f: - input_shape = pickle.load(f) - pytorch2caffe(model, input_shape, save_dir) diff --git a/vega/evaluator/tools/pytorch2caffe.sh b/vega/evaluator/tools/pytorch2caffe.sh deleted file mode 100644 index a2316cb..0000000 --- a/vega/evaluator/tools/pytorch2caffe.sh +++ /dev/null @@ -1 +0,0 @@ -python3 $1/pytorch2caffe.py $2 $3 \ No newline at end of file diff --git a/vega/evaluator/tools/pytorch2onnx.py b/vega/evaluator/tools/pytorch2onnx.py index 184d956..1f0b766 100644 --- a/vega/evaluator/tools/pytorch2onnx.py +++ b/vega/evaluator/tools/pytorch2onnx.py @@ -1,19 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The tools to convert the pytorch model to onnx model.""" from torch.autograd import Variable import torch -import subprocess -import logging -from vega.common.general import General def pytorch2onnx(model, input_shape, base_save_dir, opset_version=9): @@ -26,16 +29,9 @@ def pytorch2onnx(model, input_shape, base_save_dir, opset_version=9): :param onnx_save_path: the path and filename to save the onnx model file :type onnx_save_path: str """ - # model.load_state_dict(torch.load(weight)) - # Export the trained model to ONNX dump_input = Variable(torch.randn(input_shape)) + if hasattr(model, "get_ori_model"): + model = model.get_ori_model() torch.onnx.export(model, dump_input, "{}/torch_model.onnx".format(base_save_dir), opset_version=opset_version) - # try: - # subprocess.call( - # f"{General.python_command} -m onnxsim {base_save_dir}/torch_model.onnx " - # f"{base_save_dir}/torch_model_sim.onnx", shell=True) - # except Exception as e: - # logging.error("{}".format(str(e))) - # onnx_model = f"{base_save_dir}/torch_model_sim.onnx" onnx_model = f"{base_save_dir}/torch_model.onnx" return onnx_model diff --git a/vega/evaluator/tools/quantize_model.py b/vega/evaluator/tools/quantize_model.py index f5b0cb9..0fc699c 100644 --- a/vega/evaluator/tools/quantize_model.py +++ b/vega/evaluator/tools/quantize_model.py @@ -1,15 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """AMCT quantize functions.""" import os +import shutil import logging import numpy as np import tensorflow as tf @@ -48,7 +55,7 @@ def quantize_model(output_graph_file, test_data, input_holder, output): outputs=[output_name[:-2]], record_file=record_path, save_path=save_path) - os.system('cp {}_quantized.pb {}'.format(save_path, output_graph_file)) + shutil.copy('{}_quantized.pb'.format(save_path), output_graph_file) logging.info('amct quantinize successfully.') diff --git a/vega/evaluator/tools/rest.py b/vega/evaluator/tools/rest.py index f67c12b..7e2be86 100644 --- a/vega/evaluator/tools/rest.py +++ b/vega/evaluator/tools/rest.py @@ -1,20 +1,30 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Rest post operation.""" import requests +from vega.common.general import General +from vega import security def post(host, files, data): - """Post a rest request.""" - result = requests.post(host, files=files, data=data, proxies={"http": None}) - data = result.json() - return data + """Post a REST requstion.""" + if not General.security: + result = requests.post(host, files=files, data=data, proxies={"http": None}).json() + else: + result = security.post(host, files, data) + return result diff --git a/vega/evaluator/utils.py b/vega/evaluator/utils.py index 94d8f79..23edfe1 100644 --- a/vega/evaluator/utils.py +++ b/vega/evaluator/utils.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Fake loss for mindspore.""" from mindspore.nn.cell import Cell diff --git a/vega/metrics/__init__.py b/vega/metrics/__init__.py index 442c275..9214c68 100644 --- a/vega/metrics/__init__.py +++ b/vega/metrics/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import and register metrics automatically.""" diff --git a/vega/metrics/flops_and_params.py b/vega/metrics/flops_and_params.py index 51e1ee6..f4c9639 100644 --- a/vega/metrics/flops_and_params.py +++ b/vega/metrics/flops_and_params.py @@ -1,19 +1,31 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Model counter of FLOPS and parameters.""" - from copy import deepcopy import vega import numpy as np +extension_hooks = {} + + +def register_extension_hooks(hooks): + """Register extension hooks.""" + extension_hooks.update(hooks) + def add_new_hooks(custom_hooks): """Add new register hooks to custom hooks.""" @@ -34,7 +46,8 @@ def add_new_hooks(custom_hooks): ops.AvgPool2d: register_hooks[nn.AvgPool2d], ops.Linear: register_hooks[nn.Linear], } - + if extension_hooks: + add_register_hooks.update(extension_hooks) for k, v in add_register_hooks.items(): if k not in register_hooks and k not in custom_hooks: custom_hooks[k] = v diff --git a/vega/metrics/forward_latency.py b/vega/metrics/forward_latency.py index 29263c7..0b87eee 100644 --- a/vega/metrics/forward_latency.py +++ b/vega/metrics/forward_latency.py @@ -1,21 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Calculate model forward latency.""" -import time -import vega -import numpy as np import os -from vega.evaluator.conf import DeviceEvaluatorConfig +import time import datetime import logging +import numpy as np +import vega +from vega.evaluator.conf import DeviceEvaluatorConfig def calc_forward_latency(model, input, sess_config=None, num=10): @@ -96,8 +102,7 @@ def _calc_forward_latency_davinci(model, input, sess_config=None, num=10, evalua :rtype: float """ from vega.evaluator.tools.evaluate_davinci_bolt import evaluate - from vega.common.task_ops import TaskOps - # backend = evaluate_config.get("backend") + from vega.common import TaskOps hardware = evaluate_config.get("hardware") remote_host = evaluate_config.get("remote_host") opset_version = evaluate_config.get("opset_version") diff --git a/vega/metrics/mindspore/__init__.py b/vega/metrics/mindspore/__init__.py index 967d4f8..54582d7 100644 --- a/vega/metrics/mindspore/__init__.py +++ b/vega/metrics/mindspore/__init__.py @@ -1,10 +1,10 @@ -from .metrics import Metrics -from vega.common.class_factory import ClassFactory - - -ClassFactory.lazy_register("vega.metrics.mindspore", { - "segmentation_metric": ["trainer.metric:IoUMetric"], - "classifier_metric": ["trainer.metric:accuracy"], - "sr_metric": ["trainer.metric:PSNR", "trainer.metric:SSIM"], - "detection_metric": ["trainer.metric:CocoMetric", "trainer.metric:coco"], -}) +from vega.common.class_factory import ClassFactory +from .metrics import Metrics + + +ClassFactory.lazy_register("vega.metrics.mindspore", { + "segmentation_metric": ["trainer.metric:IoUMetric"], + "classifier_metric": ["trainer.metric:accuracy"], + "sr_metric": ["trainer.metric:PSNR", "trainer.metric:SSIM"], + "detection_metric": ["trainer.metric:CocoMetric", "trainer.metric:coco"], +}) diff --git a/vega/metrics/mindspore/classifier_metric.py b/vega/metrics/mindspore/classifier_metric.py index e7e7e4c..b5ab692 100644 --- a/vega/metrics/mindspore/classifier_metric.py +++ b/vega/metrics/mindspore/classifier_metric.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of classifier task.""" from mindspore.nn.metrics import Accuracy diff --git a/vega/metrics/mindspore/detection_metric.py b/vega/metrics/mindspore/detection_metric.py index 98ff737..44f4fa4 100644 --- a/vega/metrics/mindspore/detection_metric.py +++ b/vega/metrics/mindspore/detection_metric.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of detection task by using coco tools.""" import os @@ -102,7 +108,6 @@ def print_scores(self, det_json_file, json_file): for id, item in enumerate(self.category): cocoEval = COCOeval(coco, cocoDt, 'bbox') cocoEval.params.catIds = [id + 1] - # cocoEval.params.iouThrs = [0.5] cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() diff --git a/vega/metrics/mindspore/metrics.py b/vega/metrics/mindspore/metrics.py index 0d06f43..f6367e0 100644 --- a/vega/metrics/mindspore/metrics.py +++ b/vega/metrics/mindspore/metrics.py @@ -1,20 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of classifier task.""" from functools import partial from inspect import isfunction +from copy import deepcopy from vega.common import Config from vega.common import ClassFactory, ClassType from vega.trainer.conf import MetricsConfig -from copy import deepcopy class MetricBase(object): diff --git a/vega/metrics/mindspore/segmentation_metric.py b/vega/metrics/mindspore/segmentation_metric.py index d0a5783..94f4f2b 100644 --- a/vega/metrics/mindspore/segmentation_metric.py +++ b/vega/metrics/mindspore/segmentation_metric.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of segmentation task.""" from mindspore.nn.metrics import Metric diff --git a/vega/metrics/mindspore/sr_metric.py b/vega/metrics/mindspore/sr_metric.py index 5c6dee2..b717bf3 100644 --- a/vega/metrics/mindspore/sr_metric.py +++ b/vega/metrics/mindspore/sr_metric.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of super resolution task.""" +import math +import numpy as np from mindspore.nn.metrics import Metric from vega.common import ClassFactory, ClassType -import numpy as np -import math @ClassFactory.register(ClassType.METRIC) @@ -84,9 +90,7 @@ def compute_metric(self, img_sr, img_hr): diff = (img_sr - img_hr) mse = np.mean(np.power(diff, 2)) sr_metric = -10 * math.log10(mse) - # sr_metric = nn.PSNR()(Tensor(img_sr), Tensor(img_hr)) return sr_metric - # return self._convert_data(sr_metric)[0] @property def objective(self): diff --git a/vega/metrics/pytorch/__init__.py b/vega/metrics/pytorch/__init__.py index b7308f3..adff605 100644 --- a/vega/metrics/pytorch/__init__.py +++ b/vega/metrics/pytorch/__init__.py @@ -1,11 +1,10 @@ -from .metrics import Metrics from vega.common.class_factory import ClassFactory +from .metrics import Metrics ClassFactory.lazy_register("vega.metrics.pytorch", { "lane_metric": ["trainer.metric:LaneMetric"], "regression": ["trainer.metric:MSE", "trainer.metric:mse"], "detection_metric": ["trainer.metric:CocoMetric", "trainer.metric:coco"], - "gan_metric": ["trainer.metric:GANMetric"], "classifier_metric": ["trainer.metric:accuracy", "trainer.metric:Accuracy", "trainer.metric:SklearnMetrics"], "auc_metrics": ["trainer.metric:AUC", "trainer.metric:auc"], "segmentation_metric": ["trainer.metric:IoUMetric"], diff --git a/vega/metrics/pytorch/auc_metrics.py b/vega/metrics/pytorch/auc_metrics.py index e1d73ac..24394c7 100644 --- a/vega/metrics/pytorch/auc_metrics.py +++ b/vega/metrics/pytorch/auc_metrics.py @@ -1,46 +1,48 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - - -"""Metric of classifier task.""" -from vega.metrics.pytorch.metrics import MetricBase -from vega.common import ClassFactory, ClassType -from sklearn.metrics import roc_auc_score - - -@ClassFactory.register(ClassType.METRIC, alias='auc') -class AUC(MetricBase): - """Calculate roc_auc_score between output and target.""" - - # _metric_name__ = 'auc' - - def __init__(self, **kwargs): - """Init AUC metric.""" - self.pfm = 0. - self.__metric_name__ = "auc" - print("init roc_auc_score metric finish") - - def __call__(self, output, target, *args, **kwargs): - """Call auc metric calculate.""" - output = output.tolist() - target = target.tolist() - # print("output:", len(output)) - res = roc_auc_score(y_score=output, y_true=target) - self.pfm = res - # print("auc metrics:", res) - return res - - def reset(self): - """Reset states for new evaluation after each epoch.""" - self.pfm = 0. - - def summary(self): - """Summary all cached records, here is the last pfm record.""" - return self.pfm +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Metric of classifier task.""" +from vega.metrics.pytorch.metrics import MetricBase +from vega.common import ClassFactory, ClassType +from sklearn.metrics import roc_auc_score + + +@ClassFactory.register(ClassType.METRIC, alias='auc') +class AUC(MetricBase): + """Calculate roc_auc_score between output and target.""" + + def __init__(self, **kwargs): + """Init AUC metric.""" + self.pfm = 0. + self.__metric_name__ = "auc" + print("init roc_auc_score metric finish") + + def __call__(self, output, target, *args, **kwargs): + """Call auc metric calculate.""" + output = output.tolist() + target = target.tolist() + res = roc_auc_score(y_score=output, y_true=target) + self.pfm = res + return res + + def reset(self): + """Reset states for new evaluation after each epoch.""" + self.pfm = 0. + + def summary(self): + """Summary all cached records, here is the last pfm record.""" + return self.pfm diff --git a/vega/metrics/pytorch/classifier_metric.py b/vega/metrics/pytorch/classifier_metric.py index 2f00eef..38ba7b0 100644 --- a/vega/metrics/pytorch/classifier_metric.py +++ b/vega/metrics/pytorch/classifier_metric.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of classifier task.""" from functools import partial @@ -37,7 +43,7 @@ def accuracy(output, target, top_k=(1,)): res = [] for k in top_k: correct_k = correct[:k].reshape(-1).float().sum(0) - res.append(correct_k.mul_(100.0 / batch_size)) + res.append(correct_k / batch_size) return res @@ -63,6 +69,8 @@ def __call__(self, output, target, *args, **kwargs): """ if isinstance(output, tuple): output = output[0] + if isinstance(target, tuple) or isinstance(target, list): + target = target[0] res = accuracy(output, target, self.topk) n = output.size(0) self.data_num += n diff --git a/vega/metrics/pytorch/detection_metric.py b/vega/metrics/pytorch/detection_metric.py index 6a462f6..b7adecf 100644 --- a/vega/metrics/pytorch/detection_metric.py +++ b/vega/metrics/pytorch/detection_metric.py @@ -1,21 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of detection task by using coco tools.""" import os import json from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval -from vega.common import ClassFactory, ClassType +from vega.common import ClassFactory, ClassType, TaskOps from vega.metrics.pytorch.metrics import MetricBase -from vega.common.task_ops import TaskOps @ClassFactory.register(ClassType.METRIC, alias='coco') @@ -102,7 +107,6 @@ def print_scores(self, det_json_file, json_file): for id, item in enumerate(self.category): cocoEval = COCOeval(coco, cocoDt, 'bbox') cocoEval.params.catIds = [id + 1] - # cocoEval.params.iouThrs = [0.5] cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() diff --git a/vega/metrics/pytorch/gan_metric.py b/vega/metrics/pytorch/gan_metric.py deleted file mode 100644 index 832cec4..0000000 --- a/vega/metrics/pytorch/gan_metric.py +++ /dev/null @@ -1,134 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Metric of Gan task. - -This is not a official metric for IS score and FID. in order to publish paper, -you can use code from https://github.com/lzhbrian/metrics. - -""" -import torch -import numpy as np -from vega.common import ClassFactory, ClassType -import os -import torch.nn.functional as F -from torch.autograd import Variable -from torch import nn -from torchvision.models.inception import inception_v3 -from scipy.stats import entropy - - -def inception_score(imgs, model_checkpoint, cuda=True, batch_size=100, resize=True, splits=1): - """Compute the inception score of the generated images imgs.""" - N = len(imgs) - # Set up dtype - if cuda: - dtype = torch.cuda.FloatTensor - else: - if torch.cuda.is_available(): - print("WARNING: You have a CUDA device, so you should probably set cuda=True") - dtype = torch.FloatTensor - - # Set up dataloader - dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size) - - # Load inception model - inception_model = inception_v3( - pretrained=False, transform_input=False).type(dtype) - if model_checkpoint is None: - model_checkpoint = "/workspace/code_paper/inception_v3_google-1a9a5a14.pth" - if not os.path.isfile(model_checkpoint): - raise Exception(f"Pretrained model is not existed, model={model_checkpoint}") - checkpoint = torch.load(model_checkpoint) - inception_model.load_state_dict(checkpoint) - inception_model.eval() - up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype) - - def get_pred(x): - if resize: - x = up(x) - x = inception_model(x) - return F.softmax(x).data.cpu().numpy() - - # Get predictions - preds = np.zeros((N, 1000)) - - for i, batch in enumerate(dataloader): - batch = batch.type(dtype) - batchv = Variable(batch) - batch_size_i = batch.size()[0] - - preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batchv) - - # Now compute the mean kl-div - split_scores = [] - - for k in range(splits): - part = preds[k * (N // splits): (k + 1) * (N // splits), :] - py = np.mean(part, axis=0) - scores = [] - for i in range(part.shape[0]): - pyx = part[i, :] - scores.append(entropy(pyx, py)) - split_scores.append(np.exp(np.mean(scores))) - - return np.mean(split_scores), np.std(split_scores) - - -@ClassFactory.register(ClassType.METRIC) -class GANMetric(object): - """Calculate SR metric between output and target.""" - - def __init__(self, model_checkpoint=None, latent_dim=120): - self.model_checkpoint = model_checkpoint - self.sum = 0. - self.pfm = 0. - self.latent_dim = latent_dim - - def __call__(self, output=None, target=None, model=None, **kwargs): - """Calculate SR metric. - - :param output: output of segmentation network - :param target: ground truth from dataset - :return: confusion matrix sum - """ - if model is not None: - img_list = list() - eval_iter = 50000 // 100 - self.sum = 50000 - for iter_idx in range(eval_iter): - z = torch.cuda.FloatTensor( - np.random.normal(0, 1, (100, self.latent_dim))) - # generate a batch of images - gen_imgs = model(z).mul_(127.5).add_(127.5).clamp_(0.0, 255.0) - img_list.extend(list(gen_imgs)) - mean, std = inception_score(img_list, self.model_checkpoint) - self.pfm = mean - return mean - else: - raise Exception("Must give a model") - - def reset(self): - """Reset states for new evaluation after each epoch.""" - self.sum = 0. - self.pfm = 0. - self.data_num = 0 - - def summary(self): - """Summary all cached records, here is the last pfm record.""" - return self.pfm - - @property - def results(self): - """Return metrics results.""" - res = {} - if self.model is None: - res["value"] = self.pfm - return res diff --git a/vega/metrics/pytorch/lane_metric.py b/vega/metrics/pytorch/lane_metric.py index d0d9eaa..52ed128 100644 --- a/vega/metrics/pytorch/lane_metric.py +++ b/vega/metrics/pytorch/lane_metric.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of lane detection task.""" -from vega.common import ClassFactory, ClassType -from vega.metrics.pytorch.metrics import MetricBase import sys -from scipy.optimize import linear_sum_assignment +from itertools import product import cv2 import numpy as np -from itertools import product +from scipy.optimize import linear_sum_assignment +from vega.common import ClassFactory, ClassType +from vega.metrics.pytorch.metrics import MetricBase def calc_x(f, t): @@ -231,10 +237,6 @@ def evaluate_core(*, gt_lanes, pr_lanes, gt_wh, pr_wh, hyperp): gt_x_ratio = np.true_divide(gt_wh['width'], new_width) pr_y_ratio = np.true_divide(pr_wh['height'], new_height) pr_x_ratio = np.true_divide(pr_wh['width'], new_width) - # resize lanes and interp lanes, - # all the gt and pr are mapping to src img, so the scale ratio is same, - # note that the scale ratio is not a factor but a divisor - # print('gt_lane',gt_lanes) gt_lanes = list(map(lambda lane: resize_lane(lane, gt_x_ratio, gt_y_ratio), gt_lanes)) pr_lanes = list(map(lambda lane: resize_lane(lane, pr_x_ratio, pr_y_ratio), pr_lanes)) @@ -245,7 +247,6 @@ def evaluate_core(*, gt_lanes, pr_lanes, gt_wh, pr_wh, hyperp): for (index_gt, gt_lane), (index_pr, pr_lane) in product(enumerate(sorted_gt_lanes), enumerate(sorted_pr_lanes)): iou_mat[index_gt][index_pr] = calc_iou(gt_lane, pr_lane, hyperp) - # match_idx = Munkres().compute(make_cost_matrix(iou_mat, lambda iou: float(1.0 - iou))) cost_matrix = 1 - np.array(iou_mat) match_index_list = linear_sum_assignment(cost_matrix) diff --git a/vega/metrics/pytorch/metrics.py b/vega/metrics/pytorch/metrics.py index 49ca263..96511f3 100644 --- a/vega/metrics/pytorch/metrics.py +++ b/vega/metrics/pytorch/metrics.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Base class for metrics. All metric class should be implement this base class.""" from functools import partial diff --git a/vega/metrics/pytorch/nlp_metric.py b/vega/metrics/pytorch/nlp_metric.py index b650b26..762cae8 100644 --- a/vega/metrics/pytorch/nlp_metric.py +++ b/vega/metrics/pytorch/nlp_metric.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of nlp task.""" import sklearn.metrics as me diff --git a/vega/metrics/pytorch/r2score.py b/vega/metrics/pytorch/r2score.py index 7118066..61e0f87 100644 --- a/vega/metrics/pytorch/r2score.py +++ b/vega/metrics/pytorch/r2score.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of Regression task.""" import numpy as np diff --git a/vega/metrics/pytorch/recall_eval.py b/vega/metrics/pytorch/recall_eval.py deleted file mode 100644 index 8f54df5..0000000 --- a/vega/metrics/pytorch/recall_eval.py +++ /dev/null @@ -1,163 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Metric of recall.""" -import numpy as np - - -def _recalls(all_ious, proposal_nums, thrs): - """Calculate recalls according to IoUs. - - :param all_ious: all calculated ious - :type all_ious: list of numpy array - :param proposal_nums: proposal numbers - :type proposal_nums: numpy array - :param thrs: thresholds - :type thrs: numpy array - :return: recalls of all thresholds - :rtype: 2D numpy array - """ - img_num = all_ious.shape[0] - total_gt_num = sum([ious.shape[0] for ious in all_ious]) - _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32) - for k, proposal_num in enumerate(proposal_nums): - tmp_ious = np.zeros(0) - for i in range(img_num): - ious = all_ious[i][:, :proposal_num].copy() - gt_ious = np.zeros((ious.shape[0])) - if ious.size == 0: - tmp_ious = np.hstack((tmp_ious, gt_ious)) - continue - for j in range(ious.shape[0]): - gt_max_overlaps = ious.argmax(axis=1) - max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps] - gt_idx = max_ious.argmax() - gt_ious[j] = max_ious[gt_idx] - box_idx = gt_max_overlaps[gt_idx] - ious[gt_idx, :] = -1 - ious[:, box_idx] = -1 - tmp_ious = np.hstack((tmp_ious, gt_ious)) - _ious[k, :] = tmp_ious - _ious = np.fliplr(np.sort(_ious, axis=1)) - recalls = np.zeros((proposal_nums.size, thrs.size)) - for i, thr in enumerate(thrs): - recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num) - return recalls - - -def set_recall_param(proposal_nums, iou_thrs): - """Check proposal_nums and iou_thrs and set correct format. - - :param proposal_nums: proposal numbers - :type proposal_nums: int or list or numpy array - :param iou_thrs: IoU thresholds - :type iou_thrs: list or float or None - :return: proposal_nums, IoU thresholds - :rtype: tuple of numpy array - """ - if isinstance(proposal_nums, list): - _proposal_nums = np.array(proposal_nums) - elif isinstance(proposal_nums, int): - _proposal_nums = np.array([proposal_nums]) - else: - _proposal_nums = proposal_nums - if iou_thrs is None: - _iou_thrs = np.array([0.5]) - elif isinstance(iou_thrs, list): - _iou_thrs = np.array(iou_thrs) - elif isinstance(iou_thrs, float): - _iou_thrs = np.array([iou_thrs]) - else: - _iou_thrs = iou_thrs - return _proposal_nums, _iou_thrs - - -def bbox_overlaps(bboxes1, bboxes2, mode='iou'): - """Calculate the ious between two bboxes. - - :param bboxes1: bboxes 1 - :type bboxes1: numpy array, shape (n, 4) - :param bboxes: bboxes 2 - :type bboxes2: numpy array, shape (k, 4) - :param mode: type of overlaps - :type model: str, iou or iof, default iou - :return: ious of two bboxes - :rtype: numpy array, shape (n, k) - """ - if mode not in ['iou', 'iof']: - raise TypeError('mode should be iou or iof') - bboxes1 = bboxes1.astype(np.float32) - bboxes2 = bboxes2.astype(np.float32) - rows = bboxes1.shape[0] - cols = bboxes2.shape[0] - ious = np.zeros((rows, cols), dtype=np.float32) - if rows * cols == 0: - return ious - exchange = False - if bboxes1.shape[0] > bboxes2.shape[0]: - bboxes1, bboxes2 = bboxes2, bboxes1 - ious = np.zeros((cols, rows), dtype=np.float32) - exchange = True - area1 = (bboxes1[:, 2] - bboxes1[:, 0] + 1) * \ - (bboxes1[:, 3] - bboxes1[:, 1] + 1) - area2 = (bboxes2[:, 2] - bboxes2[:, 0] + 1) * \ - (bboxes2[:, 3] - bboxes2[:, 1] + 1) - for i in range(bboxes1.shape[0]): - x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) - y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) - x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) - y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) - overlap = np.maximum(x_end - x_start + 1, 0) * np.maximum( - y_end - y_start + 1, 0) - if mode == 'iou': - union = area1[i] + area2 - overlap - else: - union = area1[i] if not exchange else area2 - ious[i, :] = overlap / union - if exchange: - ious = ious.T - return ious - - -def eval_recalls(gts, proposals, proposal_nums=None, iou_thrs=None): - """Calculate recalls. - - :param gts: ground truth bboxes - :type gts: list of numpy array - :param proposals: proposal results - :type proposals: list of numpy arrays - :param proposal_nums: proposal numbers - :type proposal_nums: tuple of int, default to None - :param iou_thrs: IoU thresholds - :type iou_thrs: numpy array of thresholds, default to None - :return: recalls - :rtype: numpy 2D arrays - """ - img_num = len(gts) - if img_num != len(proposals): - raise Exception('img_num must be equal to length of proposals') - proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs) - all_ious = [] - for i in range(img_num): - if proposals[i].ndim == 2 and proposals[i].shape[1] == 5: - scores = proposals[i][:, 4] - sort_idx = np.argsort(scores)[::-1] - img_proposal = proposals[i][sort_idx, :] - else: - img_proposal = proposals[i] - prop_num = min(img_proposal.shape[0], proposal_nums[-1]) - if gts[i] is None or gts[i].shape[0] == 0: - ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) - else: - ious = bbox_overlaps(gts[i], img_proposal[:prop_num, :4]) - all_ious.append(ious) - all_ious = np.array(all_ious) - recalls = _recalls(all_ious, proposal_nums, iou_thrs) - return recalls diff --git a/vega/metrics/pytorch/regression.py b/vega/metrics/pytorch/regression.py index a4e2a63..d6a8f5e 100644 --- a/vega/metrics/pytorch/regression.py +++ b/vega/metrics/pytorch/regression.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of Regression task.""" from torch.nn import functional as F diff --git a/vega/metrics/pytorch/segmentation_metric.py b/vega/metrics/pytorch/segmentation_metric.py index af519f0..4b01cef 100644 --- a/vega/metrics/pytorch/segmentation_metric.py +++ b/vega/metrics/pytorch/segmentation_metric.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of segmentation task.""" import numpy as np diff --git a/vega/metrics/pytorch/sr_metric.py b/vega/metrics/pytorch/sr_metric.py index b8e8b1a..a046322 100644 --- a/vega/metrics/pytorch/sr_metric.py +++ b/vega/metrics/pytorch/sr_metric.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of super solution task.""" +import math import torch import numpy as np import cv2 -import math from vega.metrics.pytorch.metrics import MetricBase from vega.common import ClassFactory, ClassType diff --git a/vega/metrics/runtime_estimate.py b/vega/metrics/runtime_estimate.py index e2d8d25..86c0f32 100644 --- a/vega/metrics/runtime_estimate.py +++ b/vega/metrics/runtime_estimate.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Remaining Runtime Estimator.""" import time @@ -23,7 +29,11 @@ class RuntimeEstimator(object): :type max_steps: list or tuple """ - def __init__(self, types=['epoch', 'train'], max_steps=[0, 0]): + def __init__(self, types=None, max_steps=None): + if types is None: + types = ['epoch', 'train'] + if max_steps is None: + max_steps = [0, 0] self.estimator = Config() if not isinstance(types, list) or not isinstance(max_steps, list): types = [types] @@ -90,7 +100,6 @@ def remaining_time(self, type, step): run_est.current_step = step run_steps = run_est.current_step - run_est.start_step remain_time = interval * (run_est.max_step - run_est.current_step) / run_steps - # return datetime.timedelta(seconds=int(remain_time)) return remain_time / 3600 def using_time(self, type): diff --git a/vega/metrics/tensorflow/__init__.py b/vega/metrics/tensorflow/__init__.py index 31dcf8c..5eb861d 100644 --- a/vega/metrics/tensorflow/__init__.py +++ b/vega/metrics/tensorflow/__init__.py @@ -1,5 +1,5 @@ -from .metrics import Metrics from vega.common.class_factory import ClassFactory +from .metrics import Metrics ClassFactory.lazy_register("vega.metrics.tensorflow", { diff --git a/vega/metrics/tensorflow/classifier_metric.py b/vega/metrics/tensorflow/classifier_metric.py index 2932b99..dcc6480 100644 --- a/vega/metrics/tensorflow/classifier_metric.py +++ b/vega/metrics/tensorflow/classifier_metric.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of classifier task.""" import tensorflow as tf diff --git a/vega/metrics/tensorflow/forecast.py b/vega/metrics/tensorflow/forecast.py index 42466fb..2cbf4bb 100644 --- a/vega/metrics/tensorflow/forecast.py +++ b/vega/metrics/tensorflow/forecast.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of Regression task.""" from vega.metrics.pytorch.metrics import MetricBase diff --git a/vega/metrics/tensorflow/metrics.py b/vega/metrics/tensorflow/metrics.py index 213abac..41979a3 100644 --- a/vega/metrics/tensorflow/metrics.py +++ b/vega/metrics/tensorflow/metrics.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of classifier task.""" from functools import partial diff --git a/vega/metrics/tensorflow/r2score.py b/vega/metrics/tensorflow/r2score.py index 4509d30..6af3703 100644 --- a/vega/metrics/tensorflow/r2score.py +++ b/vega/metrics/tensorflow/r2score.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of Regression task.""" import tensorflow as tf diff --git a/vega/metrics/tensorflow/segmentation_metric.py b/vega/metrics/tensorflow/segmentation_metric.py index d076792..747a041 100644 --- a/vega/metrics/tensorflow/segmentation_metric.py +++ b/vega/metrics/tensorflow/segmentation_metric.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of segmentation task.""" import tensorflow as tf diff --git a/vega/metrics/tensorflow/sr_metric.py b/vega/metrics/tensorflow/sr_metric.py index 6831a0f..94aedf2 100644 --- a/vega/metrics/tensorflow/sr_metric.py +++ b/vega/metrics/tensorflow/sr_metric.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Metric of super resolution task.""" import tensorflow as tf diff --git a/vega/model_zoo/__init__.py b/vega/model_zoo/__init__.py index b0615c9..505b4a8 100644 --- a/vega/model_zoo/__init__.py +++ b/vega/model_zoo/__init__.py @@ -1,16 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import and register torch vision model automatically.""" from .model_zoo import ModelZoo +from .tuner import ModelTuner def register_modelzoo(backend): diff --git a/vega/model_zoo/fusion.py b/vega/model_zoo/fusion.py index 1daa174..5b54ee0 100644 --- a/vega/model_zoo/fusion.py +++ b/vega/model_zoo/fusion.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Fuse Operator.""" import logging import copy import vega -from vega.modules.operators import Identity +from vega.modules.operators.ops import Identity if vega.is_torch_backend(): import torch diff --git a/vega/model_zoo/model_zoo.py b/vega/model_zoo/model_zoo.py index 3cbb862..e4022b4 100644 --- a/vega/model_zoo/model_zoo.py +++ b/vega/model_zoo/model_zoo.py @@ -1,23 +1,32 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Model zoo.""" import os +import subprocess import logging import glob -import numpy +import uuid from collections import OrderedDict +import numpy import vega from vega.networks.network_desc import NetworkDesc from vega.common.general import General +from vega.common import FileOps class ModelZoo(object): @@ -54,7 +63,6 @@ def get_model(cls, model_desc=None, pretrained_model_file=None, head=None, is_fu raise ValueError("model desc can't be None when create model.") try: model = NetworkDesc(model_desc).to_model() - # return model except Exception as e: logging.error("Failed to get model, model_desc={}, msg={}".format(model_desc, str(e))) raise e @@ -99,6 +107,27 @@ def to_module(cls, model): return model return ModelZoo.get_model(model_desc) + @classmethod + def refine(cls, model, refine_model): + """Replace model weights and params by a new model.""" + model_names = [name for name, module in model.named_modules()] + for name, module in refine_model.named_modules(): + if name in model_names: + cls.change_module(model, name, module) + return model + + @classmethod + def change_module(cls, model, name, entity): + """Change module.""" + if not entity: + return + tokens = name.split('.') + attr_name = tokens[-1] + parent_names = tokens[:-1] + for s in parent_names: + model = getattr(model, s) + setattr(model, attr_name, entity) + @classmethod def parse_desc_from_pretrained_model(cls, src_model, pb_file=None): """Parse desc from Petrained Model.""" @@ -139,57 +168,65 @@ def _exclude_checkpoint_by_prefix(cls, states, head_prefix): def _load_pretrained_model(cls, model, pretrained_model_file, exclude_weight_prefix=None): pretrained_model_file = cls._get_abs_path(pretrained_model_file) logging.info("load model weights from file, weights file={}".format(pretrained_model_file)) + if not os.path.exists(pretrained_model_file): + pretrained_model_file = FileOps.download_pretrain_model(pretrained_model_file) if vega.is_torch_backend(): - import torch - if not os.path.isfile(pretrained_model_file): - raise Exception(f"Pretrained model is not existed, model={pretrained_model_file}") - if vega.is_npu_device(): - from vega.common.task_ops import TaskOps - import time - device = int(os.environ.get('DEVICE_ID', 0)) - target_model_file = "{}/checkpoint_{}_{}.pth".format( - TaskOps().temp_path, device, round(time.time() * 1000)) - cmd = "/bin/cp -f {} {} && sed -i 's/npu:[0-9]/npu:{}/g' {}".format( - pretrained_model_file, target_model_file, device, target_model_file) - ret = os.system(cmd) - logging.info("modify weight file result: " + str(ret)) - checkpoint = torch.load(target_model_file) - if os.path.exists(target_model_file): - os.remove(target_model_file) - else: - checkpoint = torch.load(pretrained_model_file) - if exclude_weight_prefix: - # TODO: make it more generalize - if vega.is_torch_backend(): - model.load_state_dict(checkpoint, False, exclude_weight_prefix=exclude_weight_prefix) - else: - checkpoint = cls._exclude_checkpoint_by_prefix(checkpoint, exclude_weight_prefix) - model.load_state_dict(checkpoint, False) - else: - model.load_state_dict(checkpoint) + return cls._load_torch_model(model, pretrained_model_file, exclude_weight_prefix) + elif vega.is_tf_backend(): + return cls._load_tf_model(model, pretrained_model_file) + else: + return cls._load_ms_model(model, pretrained_model_file) - # del checkpoint - if vega.is_tf_backend(): - if pretrained_model_file.endswith('.pth'): - checkpoint = convert_checkpoint_from_pytorch(pretrained_model_file, model) - model.load_checkpoint_from_numpy(checkpoint) + @classmethod + def _load_torch_model(cls, model, pretrained_model_file, exclude_weight_prefix=None): + import torch + if not os.path.isfile(pretrained_model_file): + raise Exception(f"Pretrained model is not existed, model={pretrained_model_file}") + if vega.is_npu_device(): + device = int(os.environ.get('DEVICE_ID', 0)) + target_model_file = f"{os.path.dirname(pretrained_model_file)}/temp_{device}_{uuid.uuid1().hex[:8]}" + ret_cp = subprocess.call(["/bin/cp", "-f", pretrained_model_file, target_model_file]) + ret_sed = subprocess.call(["/bin/sed", "-i", "-e", f"s/npu:[0-9]/npu:{device}/g", target_model_file]) + logging.info(f"modify weight file result: {ret_cp}|{ret_sed}") + checkpoint = torch.load(target_model_file) + if os.path.exists(target_model_file): + os.remove(target_model_file) + else: + checkpoint = torch.load(pretrained_model_file) + if exclude_weight_prefix: + if vega.is_torch_backend(): + model.load_state_dict(checkpoint, False, exclude_weight_prefix=exclude_weight_prefix) else: - pretrained_model_file = cls._get_tf_model_file(pretrained_model_file) - model.load_checkpoint(pretrained_model_file) - elif vega.is_ms_backend(): - from mindspore.train.serialization import load_checkpoint - if hasattr(model, "pretrained"): - pretrained_weight = model.pretrained(pretrained_model_file) + checkpoint = cls._exclude_checkpoint_by_prefix(checkpoint, exclude_weight_prefix) + model.load_state_dict(checkpoint, False) + else: + model.load_state_dict(checkpoint) + return model + + @classmethod + def _load_tf_model(cls, model, pretrained_model_file): + if pretrained_model_file.endswith('.pth'): + checkpoint = convert_checkpoint_from_pytorch(pretrained_model_file, model) + model.load_checkpoint_from_numpy(checkpoint) + else: + pretrained_model_file = cls._get_tf_model_file(pretrained_model_file) + model.load_checkpoint(pretrained_model_file) + return model + + @classmethod + def _load_ms_model(cls, model, pretrained_model_file): + from mindspore.train.serialization import load_checkpoint + if hasattr(model, "pretrained"): + pretrained_weight = model.pretrained(pretrained_model_file) + else: + if os.path.isfile(pretrained_model_file): + pretrained_weight = pretrained_model_file else: - if os.path.isfile(pretrained_model_file): - pretrained_weight = pretrained_model_file - else: - for file in os.listdir(pretrained_model_file): - if file.endswith(".ckpt"): - pretrained_weight = os.path.join(pretrained_model_file, file) - break - load_checkpoint(pretrained_weight, net=model) - # os.remove(pretrained_weight) + for file in os.listdir(pretrained_model_file): + if file.endswith(".ckpt"): + pretrained_weight = os.path.join(pretrained_model_file, file) + break + load_checkpoint(pretrained_weight, net=model) return model @classmethod @@ -203,7 +240,7 @@ def select_compressed_models(cls, model_zoo_file, standard, num): @classmethod def _get_abs_path(cls, _path): if "{local_base_path}" in _path: - from vega.common.task_ops import TaskOps + from vega.common import TaskOps return os.path.abspath(_path.replace("{local_base_path}", TaskOps().local_base_path)) return _path diff --git a/vega/model_zoo/torch_vision_model.py b/vega/model_zoo/torch_vision_model.py index 785ce88..9f18f31 100644 --- a/vega/model_zoo/torch_vision_model.py +++ b/vega/model_zoo/torch_vision_model.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import all torchvision networks and models.""" from types import ModuleType diff --git a/vega/model_zoo/tuner.py b/vega/model_zoo/tuner.py new file mode 100644 index 0000000..e351260 --- /dev/null +++ b/vega/model_zoo/tuner.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# This program is free software; you can redistribute it and/or modify +# it under the terms of the MIT License. +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# MIT License for more details. + +"""Base Trainer.""" + +import logging + +import vega +from vega.common import FileOps, TaskOps +from vega.core.pipeline.conf import PipeStepConfig +from vega.model_zoo import ModelZoo +from vega.report import ReportClient +from vega.common.class_factory import ClassFactory, ClassType + + +class ModelTuner(object): + """Model Tuner that can call the nas algorithm to search a new model.""" + + __worker_id__ = None + __step_name__ = None + __fns__ = None + + @classmethod + def setup(cls, step_name, worker_id): + """Set step name and work id.""" + cls.__step_name__ = step_name + cls.__worker_id__ = worker_id + + @classmethod + def register_fn(cls, fn_name, **kwargs): + """Register function and params.""" + cls.__fns__ = [fn_name, kwargs] + + @classmethod + def get_fn(cls): + """Get function info.""" + return tuple(cls.__fns__) + + @classmethod + def build_model(cls, model): + """Build a new mode by call dag search algorithm.""" + logging.info("Start tune model.") + record = ReportClient().get_record(cls.__step_name__, cls.__worker_id__) + device = next(model.parameters()).device + if not record or not record.desc: + model = cls.build_on_fine_tune(model) + model = cls.build_after_nas(model) + return model.to(device) + + @classmethod + def build_on_fine_tune(cls, model): + """Parse model to desc on the first time.""" + step_name, worker_id = cls.__step_name__, cls.__worker_id__ + dag_cls = ClassFactory.get_cls(ClassType.NETWORK, 'Script2Vega') + dag_model = dag_cls(model=model)() + desc = dag_model.to_desc() + ReportClient().update(step_name, worker_id, desc=desc) + cls._save(dag_model) + logging.info("End to Fine tune model.") + return model + + @classmethod + def build_after_nas(cls, model): + """Build a new model on nas pipe step.""" + record = ReportClient().get_record(cls.__step_name__, cls.__worker_id__) + dag_model = ModelZoo().get_model(record.desc, record.weights_file or PipeStepConfig.model.pretrained_model_file) + ModelZoo().refine(model, dag_model) + logging.info("End to tune model.") + return model + + @classmethod + def update(cls, model=None, performance=None): + """Update performance and save weights.""" + ReportClient().update(cls.__step_name__, cls.__worker_id__, performance=performance) + if model: + cls._save(model) + + @classmethod + def _save(cls, model): + # save fine_tune weights. + step_name, worker_id = cls.__step_name__, cls.__worker_id__ + weights_file = FileOps.join_path( + TaskOps().get_local_worker_path(step_name, worker_id), "model_{}.pth".format(worker_id)) + if vega.is_torch_backend(): + import torch + state_dict = model.state_dict() + if isinstance(model, torch.nn.DataParallel): + state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()} + torch.save(state_dict, weights_file) + elif vega.is_ms_backend(): + from mindspore.train.serialization import save_checkpoint + save_checkpoint(model, weights_file) diff --git a/vega/model_zoo/weights_tools.py b/vega/model_zoo/weights_tools.py index 3efb5ff..be0a12f 100644 --- a/vega/model_zoo/weights_tools.py +++ b/vega/model_zoo/weights_tools.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Model zoo.""" import re @@ -46,8 +52,10 @@ def convert_faster_backbone_weights(model, state_dict): state_name = state_name.replace('norm', 'bn') state_name = state_name.replace('head', 'fc') new_state_dict[name] = state_dict.pop(state_name) - assert len(state_dict) == 0 - return new_state_dict + if len(state_dict) == 0: + return new_state_dict + else: + raise ValueError('Failed to convert weigh of faster_backbone.') def convert_resnet_general_weights(model, state_dict): @@ -87,8 +95,10 @@ def convert_resnet_general_weights(model, state_dict): state_name = re.sub(r'batch', '1', state_name) state_name = re.sub(r'batch', 'bn', state_name) new_state_dict[name] = state_dict.pop(state_name) - assert len(state_dict) == 0 - return new_state_dict + if len(state_dict) == 0: + return new_state_dict + else: + raise ValueError('Failed to convert weigh of resnet_general.') def convert_torch_resnet_weights_to_serialClassificationNet(model, state_dict, strict=True): @@ -97,7 +107,6 @@ def convert_torch_resnet_weights_to_serialClassificationNet(model, state_dict, s new_state_dict = OrderedDict() for name in names: state_name = name - # state_name = name.replace('backbone.', '') state_name = state_name.replace('backbone.', '') if name.startswith('head.linear'): state_name = state_name.replace('head.linear', 'fc') @@ -112,7 +121,8 @@ def convert_torch_resnet_weights_to_serialClassificationNet(model, state_dict, s state_name = state_name.replace('layers.{}'.format(layer_no), 'layer{}'.format(layer_no + 1)) new_state_dict[name] = state_dict.pop(state_name) if strict: - assert len(state_dict) == 0 + if len(state_dict) != 0: + raise ValueError('Failed to convert resnet weights to serialClassificationNet') return new_state_dict diff --git a/vega/modules/__init__.py b/vega/modules/__init__.py index f87f1c9..cb69be7 100644 --- a/vega/modules/__init__.py +++ b/vega/modules/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import and register modules automatically.""" diff --git a/vega/modules/arch/architecture.py b/vega/modules/arch/architecture.py index bed3f7e..a66b002 100644 --- a/vega/modules/arch/architecture.py +++ b/vega/modules/arch/architecture.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Architecture.""" import os import logging @@ -17,6 +23,39 @@ from vega.core.pipeline.conf import PipeStepConfig +def change_model_rebuild(model, changed_name_list, mask_weight_list, pretrained_model_file): + """Change model and rebuild.""" + from mindspore.train.serialization import load_checkpoint + if len(changed_name_list) == len(mask_weight_list): + model_desc = model.desc + for changed_name, mask in zip(changed_name_list, mask_weight_list): + name = changed_name.split('.') + if len(name) <= 6: + if len(name) == 6: + model_desc[name[0]][name[1]][name[2]][name[3]][name[4]][name[5]] = sum(mask) + if len(name) == 5: + model_desc[name[0]][name[1]][name[2]][name[3]][name[4]] = sum(mask) + if len(name) == 4: + model_desc[name[0]][name[1]][name[2]][name[3]] = sum(mask) + if len(name) == 3: + model_desc[name[0]][name[1]][name[2]] = sum(mask) + if len(name) == 2: + model_desc[name[0]][name[1]] = sum(mask) + else: + raise ValueError('Name must be shorter than 6.') + network = NetworkDesc(model_desc) + model = network.to_model() + model_desc.pop('_arch_params') if '_arch_params' in model_desc else model_desc + model.desc = model_desc + if pretrained_model_file and hasattr(model, "pretrained"): + pretrained_weight = model.pretrained(pretrained_model_file) + load_checkpoint(pretrained_weight, net=model) + os.remove(pretrained_weight) + return model + else: + raise ValueError('Name and weight do not match.') + + def transform_architecture(model, pretrained_model_file=None): """Transform architecture.""" if not hasattr(model, "_arch_params") or not model._arch_params or \ @@ -33,34 +72,7 @@ def transform_architecture(model, pretrained_model_file=None): if not ClassFactory.is_exists(model._arch_params_type, module.model_name): continue changed_name_list, mask_weight_list = decode_fn_ms(module, changed_name_list, mask_weight_list) - assert len(changed_name_list) == len(mask_weight_list) - # change model and rebuild - model_desc = model.desc - # root_name = [name for name in list(model_desc.keys()) if name not in ('type', '_arch_params')] - for changed_name, mask in zip(changed_name_list, mask_weight_list): - name = changed_name.split('.') - # name[0] = root_name[int(name[0])] - assert len(name) <= 6 - if len(name) == 6: - model_desc[name[0]][name[1]][name[2]][name[3]][name[4]][name[5]] = sum(mask) - if len(name) == 5: - model_desc[name[0]][name[1]][name[2]][name[3]][name[4]] = sum(mask) - if len(name) == 4: - model_desc[name[0]][name[1]][name[2]][name[3]] = sum(mask) - if len(name) == 3: - model_desc[name[0]][name[1]][name[2]] = sum(mask) - if len(name) == 2: - model_desc[name[0]][name[1]] = sum(mask) - network = NetworkDesc(model_desc) - model = network.to_model() - model_desc.pop('_arch_params') if '_arch_params' in model_desc else model_desc - model.desc = model_desc - # change weight - if pretrained_model_file and hasattr(model, "pretrained"): - pretrained_weight = model.pretrained(pretrained_model_file) - load_checkpoint(pretrained_weight, net=model) - os.remove(pretrained_weight) - + model = change_model_rebuild(model, changed_name_list, mask_weight_list, pretrained_model_file) else: for name, module in model.named_modules(): if not ClassFactory.is_exists(model._arch_params_type, module.model_name): @@ -68,7 +80,6 @@ def transform_architecture(model, pretrained_model_file=None): arch_cls = ClassFactory.get_cls(model._arch_params_type, module.model_name) decode_fn(module, arch_cls) module.register_forward_pre_hook(arch_cls.fit_weights) - # module.register_forward_hook(module.clear_module_arch_params) return model diff --git a/vega/modules/arch/combiner.py b/vega/modules/arch/combiner.py index ef038d3..039c7dd 100644 --- a/vega/modules/arch/combiner.py +++ b/vega/modules/arch/combiner.py @@ -1,18 +1,24 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ConnectionsArchParamsCombiner.""" from collections import deque from vega.modules.operators import ops from vega.modules.connections import Add -from vega.modules.connections import Module +from vega.modules.module import Module def is_depth_wise_conv(module): diff --git a/vega/modules/arch/double_channels_arch.py b/vega/modules/arch/double_channels_arch.py index ee29e5c..8983c2a 100644 --- a/vega/modules/arch/double_channels_arch.py +++ b/vega/modules/arch/double_channels_arch.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Ops ArchSpace.""" from vega import is_torch_backend from vega.common.class_factory import ClassFactory @@ -44,7 +50,6 @@ def fit_weights(module, x): padding = [0, out_channels_diff] else: groups = module.groups - # depthwise conv if groups == module.in_channels and module.out_channels < groups: module.out_channels = groups in_channels_diff = int(inputs.shape[1]) - int(weight.shape[in_channels_axis] * module.groups) @@ -52,7 +57,6 @@ def fit_weights(module, x): if in_channels_diff == 0 and out_channels_diff == 0: continue padding = [0, 0, 0, 0, 0, 0, 0, 0] - # fit input channel if groups == 1: if in_channels_diff != 0: padding[5] = in_channels_diff @@ -136,7 +140,6 @@ def fit_weights(module, x): else: fit_weights_shapes.append(out_channels) fit_weights_shape = min(fit_weights_shapes) - # fit_weights_shape = max(fit_weights_shapes) for child in module.children(): if isinstance(child, ops.MaxPool2d): fit_weights_shape = inputs.shape[1] diff --git a/vega/modules/arch/prune_arch.py b/vega/modules/arch/prune_arch.py index eac22ff..a8896c9 100644 --- a/vega/modules/arch/prune_arch.py +++ b/vega/modules/arch/prune_arch.py @@ -1,15 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Prune ArchSpace.""" +import logging import vega from vega import is_torch_backend, is_tf_backend from vega.modules.operators import ops @@ -23,7 +30,7 @@ def _to_cpu(data): if torch.is_tensor(data): return data.cpu() except Exception: - pass + logging.debug('Falied to convert data to cpu.') if isinstance(data, dict): return {k: _to_cpu(v) for k, v in data.items()} @@ -99,8 +106,6 @@ def fit_weights(module, x): arch_params = module.module_arch_params if not arch_params: return None - # for name, parameter in module.named_parameters(): - # parameter.requires_grad_(False) idx_in = [idx for idx, value in enumerate(arch_params.in_features) if value == 1] weights = module.get_weights() for name, weight in weights.items(): diff --git a/vega/modules/backbones/serialnet.py b/vega/modules/backbones/serialnet.py index 07f69ef..6cdb1b7 100644 --- a/vega/modules/backbones/serialnet.py +++ b/vega/modules/backbones/serialnet.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is Network for SerialNet.""" from vega.common import ClassFactory, ClassType from vega.modules.operators import ops @@ -27,7 +33,7 @@ def __init__(self, code='111-2111-211111-211', num_classes=1000, block='Bottlene self.head = LinearClassificationHead(self.out_channels, num_classes) def load_state_dict(self, state_dict=None, strict=None): - """Load and freeze backbone.""" + """Load and freeze backbone state.""" state_dict = {k.replace('backbone.body', 'backbone'): v for k, v in state_dict.items()} return super().load_state_dict(state_dict, strict or False) diff --git a/vega/modules/blocks/__init__.py b/vega/modules/blocks/__init__.py index 0569983..a6137f0 100644 --- a/vega/modules/blocks/__init__.py +++ b/vega/modules/blocks/__init__.py @@ -2,3 +2,4 @@ build_norm_layer, build_conv_layer from .head import LinearClassificationHead, AuxiliaryHead from .micro_decoder import MicroDecoder, MergeCell, MicroDecoder_Upsample, Seghead +from .ghost import GhostModule diff --git a/vega/modules/blocks/blocks.py b/vega/modules/blocks/blocks.py index b941856..0d5e43f 100644 --- a/vega/modules/blocks/blocks.py +++ b/vega/modules/blocks/blocks.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for blocks.""" from vega.common import ClassType, ClassFactory @@ -19,7 +25,7 @@ class ShortCut(Module): """Create Shortcut SearchSpace.""" - def __init__(self, inchannel, outchannel, expansion, stride=1, norm_layer={"norm_type": 'BN'}): + def __init__(self, inchannel, outchannel, expansion, stride=1, norm_layer=None): """Create ShortCut layer. :param inchannel: input channel. @@ -32,6 +38,8 @@ def __init__(self, inchannel, outchannel, expansion, stride=1, norm_layer={"norm :type stride: int """ super(ShortCut, self).__init__() + if norm_layer is None: + norm_layer = {"norm_type": 'BN'} if stride != 1 or inchannel != outchannel * expansion: self.conv1 = ops.Conv2d(in_channels=inchannel, out_channels=outchannel * expansion, kernel_size=1, stride=stride, bias=False) @@ -44,7 +52,7 @@ def __init__(self, inchannel, outchannel, expansion, stride=1, norm_layer={"norm class BottleConv(Module): """Create BottleConv Searchspace.""" - def __init__(self, inchannel, outchannel, expansion, groups, base_width, stride=1, norm_layer={"norm_type": 'BN'}, + def __init__(self, inchannel, outchannel, expansion, groups, base_width, stride=1, norm_layer=None, Conv2d='Conv2d'): """Create BottleConv layer. @@ -58,6 +66,8 @@ def __init__(self, inchannel, outchannel, expansion, groups, base_width, stride= :type stride: int """ super(BottleConv, self).__init__() + if norm_layer is None: + norm_layer = {"norm_type": 'BN'} outchannel = int(outchannel * (base_width / 64.)) * groups self.conv1 = build_conv_layer(in_channels=inchannel, out_channels=outchannel, kernel_size=1, stride=1, bias=False, Conv2d=Conv2d) @@ -76,7 +86,7 @@ def __init__(self, inchannel, outchannel, expansion, groups, base_width, stride= class BasicConv(Module): """Create BasicConv Searchspace.""" - def __init__(self, inchannel, outchannel, groups=1, base_width=64, stride=1, norm_layer={"norm_type": 'BN'}, + def __init__(self, inchannel, outchannel, groups=1, base_width=64, stride=1, norm_layer=None, Conv2d='Conv2d'): """Create BasicConv layer. @@ -88,6 +98,8 @@ def __init__(self, inchannel, outchannel, groups=1, base_width=64, stride=1, nor :type stride: int """ super(BasicConv, self).__init__() + if norm_layer is None: + norm_layer = {"norm_type": 'BN'} self.conv = build_conv_layer(in_channels=inchannel, out_channels=outchannel, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False, Conv2d=Conv2d) self.batch = build_norm_layer(features=outchannel, **norm_layer) @@ -138,7 +150,7 @@ class BasicBlock(Module): expansion = 1 - def __init__(self, inchannel, outchannel, groups=1, base_width=64, stride=1, norm_layer={"norm_type": 'BN'}, + def __init__(self, inchannel, outchannel, groups=1, base_width=64, stride=1, norm_layer=None, Conv2d='Conv2d'): """Create BasicBlock layers. @@ -150,6 +162,8 @@ def __init__(self, inchannel, outchannel, groups=1, base_width=64, stride=1, nor :type stride: int """ super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = {"norm_type": 'BN'} base_conv = BasicConv(inchannel=inchannel, outchannel=outchannel, stride=stride, groups=groups, base_width=base_width, norm_layer=norm_layer, Conv2d=Conv2d) shortcut = ShortCut(inchannel=inchannel, outchannel=outchannel, expansion=self.expansion, @@ -164,7 +178,7 @@ class BottleneckBlock(Module): expansion = 4 - def __init__(self, inchannel, outchannel, groups=1, base_width=64, stride=1, norm_layer={"norm_type": 'BN'}, + def __init__(self, inchannel, outchannel, groups=1, base_width=64, stride=1, norm_layer=None, Conv2d='Conv2d'): """Create BottleneckBlock layers. @@ -176,6 +190,8 @@ def __init__(self, inchannel, outchannel, groups=1, base_width=64, stride=1, nor :type stride: int """ super(BottleneckBlock, self).__init__() + if norm_layer is None: + norm_layer = {"norm_type": 'BN'} bottle_conv = BottleConv(inchannel=inchannel, outchannel=outchannel, expansion=self.expansion, stride=stride, groups=groups, base_width=base_width, norm_layer=norm_layer, Conv2d=Conv2d) @@ -239,9 +255,11 @@ def build_norm_layer(features, norm_type='BN', **kwargs): if norm_type == 'BN': return ops.BatchNorm2d(features, **kwargs) elif norm_type == 'GN': - assert 'num_groups' in kwargs.keys(), 'num_groups is required for group normalization' - num_groups = kwargs.pop('num_groups') - return ops.GroupNorm(num_groups, features, **kwargs) + if 'num_groups' in kwargs.keys(): + num_groups = kwargs.pop('num_groups') + return ops.GroupNorm(num_groups, features, **kwargs) + else: + raise ValueError('Num_groups is required for group normalization') elif norm_type == 'Sync': return ops.SyncBatchNorm(features, **kwargs) else: diff --git a/vega/modules/blocks/ghost.py b/vega/modules/blocks/ghost.py new file mode 100644 index 0000000..021695f --- /dev/null +++ b/vega/modules/blocks/ghost.py @@ -0,0 +1,141 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""This is Ghost Module for blocks.""" +from vega.common import ClassType, ClassFactory +from vega.modules.module import Module +from vega.modules.operators import ops + + +class Bottleneck(Module): + """Bottleneck class.""" + + expansion = 4 + + def __init__(self, inplanes, planes, stride=1, downsample=None): + super(Bottleneck, self).__init__() + self.conv1 = ops.Conv2d(inplanes, planes, kernel_size=1, bias=False) + self.bn1 = ops.BatchNorm2d(planes) + self.conv2 = ops.Conv2d(planes, planes, kernel_size=3, stride=stride, + padding=1, bias=False) + self.bn2 = ops.BatchNorm2d(planes) + self.conv3 = ops.Conv2d(planes, planes * 4, kernel_size=1, bias=False) + self.bn3 = ops.BatchNorm2d(planes * 4) + self.relu = ops.Relu(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + """Forward x.""" + residual = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +@ClassFactory.register(ClassType.NETWORK) +class GhostModule(Module): + """Ghost Module.""" + + def __init__(self, inplanes, planes, blocks, stride=1, cheap_ratio=0.5): + super(GhostModule, self).__init__() + from torch.nn import Sequential + block = Bottleneck + self.inplanes = inplanes + self.planes = planes + self.stride = stride + downsample = None + if stride != 1 or inplanes != planes * block.expansion: + downsample = Sequential( + ops.Conv2d(inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), + ops.BatchNorm2d(planes * block.expansion), + ) + + self.blocks = blocks + self.base = block(inplanes, planes, stride, downsample) + self.end = block(planes * block.expansion, planes, 1) + + if blocks > 2: + self.c_base_half = planes * block.expansion // 2 + inplanes = planes * block.expansion // 2 + cheap_planes = int(planes * cheap_ratio) + self.cheap_planes = cheap_planes + raw_planes = planes - cheap_planes + self.merge = Sequential( + ops.AdaptiveAvgPool2d(1), + ops.Conv2d(raw_planes * block.expansion * blocks, cheap_planes * block.expansion, kernel_size=1, + stride=1, bias=False), + ops.BatchNorm2d(cheap_planes * block.expansion), + ) + self.cheap = Sequential( + ops.Conv2d(planes * block.expansion, cheap_planes * block.expansion, + kernel_size=1, stride=1, padding=0, bias=False), + ops.BatchNorm2d(cheap_planes * block.expansion), + ) + self.cheap_relu = ops.Relu(inplace=True) + + layers = [] + + inplanes = raw_planes * block.expansion + layers.append( + ops.Conv2d(planes * block.expansion, inplanes, kernel_size=1, stride=1, padding=0, bias=False)) # + + for i in range(1, blocks - 1): + layers.append(block(inplanes, raw_planes)) + self.layers = Sequential(*layers) + + def forward(self, input): + """Forward x.""" + x0 = self.base(input) + + if self.blocks > 2: + m_list = [x0] + x = x0 + for n, l in enumerate(self.layers): + x = l(x) + if n != 0: + m_list.append(x) + m = ops.concat(m_list, 1) + + m = self.merge(m) + if self.cheap_planes > 0: + c = x0 + c = self.cheap_relu(self.cheap(c) + m) + x = ops.concat((x, c), 1) + x = self.end(x) + else: + x = self.end(x0) + return x + + def to_desc(self, recursion=True): + """Convert to desc.""" + return {"type": "GhostModule", "inplanes": self.inplanes, "planes": self.planes, "blocks": self.blocks, + "stride": self.stride} diff --git a/vega/modules/blocks/head.py b/vega/modules/blocks/head.py index 9669995..fc2cd4c 100644 --- a/vega/modules/blocks/head.py +++ b/vega/modules/blocks/head.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for head.""" from vega.modules.module import Module diff --git a/vega/modules/blocks/micro_decoder.py b/vega/modules/blocks/micro_decoder.py index 3022098..6490549 100644 --- a/vega/modules/blocks/micro_decoder.py +++ b/vega/modules/blocks/micro_decoder.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for blocks.""" import sys diff --git a/vega/modules/cells/__init__.py b/vega/modules/cells/__init__.py index f3ee504..be159a5 100644 --- a/vega/modules/cells/__init__.py +++ b/vega/modules/cells/__init__.py @@ -1,2 +1,2 @@ -from .basic import * -from .dag_cell import * +from .basic import VariantCell, BasicCell +from .dag_cell import DagGraphCell, ConvBnRelu, Conv3x3BnRelu, Conv1x1BnRelu, MaxPool3x3, Input, Output diff --git a/vega/modules/cells/basic.py b/vega/modules/cells/basic.py index c4a3255..e8bfcdf 100644 --- a/vega/modules/cells/basic.py +++ b/vega/modules/cells/basic.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for network.""" from vega.common import ClassType, ClassFactory diff --git a/vega/modules/cells/dag_cell.py b/vega/modules/cells/dag_cell.py index 1ceaa32..02a1933 100644 --- a/vega/modules/cells/dag_cell.py +++ b/vega/modules/cells/dag_cell.py @@ -1,15 +1,21 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is DAG Cell for network.""" from vega.modules.module import Module -from dag import DAG +from vega.common.dag import DAG import numpy as np from vega.modules.operators import ops from vega.modules.connections import Sequential @@ -37,7 +43,7 @@ def _add_nodes(self): def _create_dag(self): dag = DAG() for name, modules in self.named_children(): - dag.add_node_if_not_exists(int(name)) + dag.add_node(int(name)) frontier = [0] num_vertices = np.shape(self.adj_matrix)[0] while frontier: @@ -57,7 +63,7 @@ def forward(self, x, *args, **kwargs): return out def _forward_module(self, x, parent, dag): - parent_nodes = dag.predecessors(parent) + parent_nodes = dag.pre_nodes(parent) if len(parent_nodes) <= 1: next_input = self._modules.get(str(parent))(x) elif self.out_tensors.get(parent) and len(self.out_tensors.get(parent)) == len(parent_nodes) - 1: @@ -69,7 +75,7 @@ def _forward_module(self, x, parent, dag): self.out_tensors[parent] = [] self.out_tensors[parent].append(x) return None - children = dag.downstream(parent) + children = dag.next_nodes(parent) for child in children: out = self._forward_module(next_input, child, dag) if out is not None: diff --git a/vega/modules/connections/__init__.py b/vega/modules/connections/__init__.py index 79a588f..61cf1ce 100644 --- a/vega/modules/connections/__init__.py +++ b/vega/modules/connections/__init__.py @@ -1 +1,2 @@ -from .connections import * +from .connections import ConnectionsDecorator, Add, Sequential, ModuleList, OutlistSequential, \ + OutDictSequential, MultiOutput, Concat, ProcessList, Repeat, Cells diff --git a/vega/modules/connections/connections.py b/vega/modules/connections/connections.py index 0d16f64..71ef9f6 100644 --- a/vega/modules/connections/connections.py +++ b/vega/modules/connections/connections.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for connection.""" from copy import deepcopy @@ -22,7 +28,6 @@ class ConnectionsDecorator(Module): def __init__(self, *models, **kwargs): super(ConnectionsDecorator, self).__init__(*models, **kwargs) - # for key, model in kwargs.items(): if kwargs: for key, model in kwargs.items(): self.__add_module(key, model) @@ -274,9 +279,11 @@ def call(self, inputs): inputs = list(inputs) for model, idx in zip(self.children(), self.out_list): if isinstance(idx, list): - assert len(idx) == 2 - output = model(inputs[idx[0]], inputs[idx[1]]) - inputs.append(output) + if len(idx) == 2: + output = model(inputs[idx[0]], inputs[idx[1]]) + inputs.append(output) + else: + raise ValueError('Idx must be 2.') else: inputs.append(model(inputs[idx])) output = inputs @@ -306,11 +313,11 @@ def __init__(self, num_reps, items, ref): v_idx = idx if len(values) > idx else -1 params[key] = values[v_idx] params = update_dict_with_flatten_keys(ref_copy, params) - name, module = create_module(params) + name, module = _create_module(params) self.add_module('{}{}'.format(name, idx), module) -def create_module(model): +def _create_module(model): """Create search space from model or desc.""" if isinstance(model, Module): return model.__class__.__name__, model @@ -335,7 +342,6 @@ def __init__(self, desc, C_curr, C, auxiliary=False, auxiliary_layer=0): self.auxiliary = auxiliary if auxiliary: self.auxiliary_layer = auxiliary_layer - # output params normal_info = desc.get('normal') if normal_info: self.k = len(normal_info.genotype) diff --git a/vega/modules/distillation/dis.py b/vega/modules/distillation/dis.py index a259aa0..e5c5957 100644 --- a/vega/modules/distillation/dis.py +++ b/vega/modules/distillation/dis.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Distillation.""" from vega.modules.module import Module from vega.common.class_factory import ClassType, ClassFactory @@ -53,28 +59,29 @@ def call(self, input_ids, token_type_ids, attention_mask, **kwargs): if not self.training: return self.head(pooled_output) att_loss, rep_loss = 0., 0. - # no grade save memory - # with ops.no_grad(): _, teacher_output = self.teacher(input_ids, token_type_ids, attention_mask) teacher_atts, teacher_reps = teacher_output[1:], teacher_output - teacher_reps = [teacher_rep.detach() for teacher_rep in teacher_reps] # speedup 1.5x + teacher_reps = [teacher_rep.detach() for teacher_rep in teacher_reps] teacher_atts = [teacher_att.detach() for teacher_att in teacher_atts] teacher_layer_num = len(teacher_atts) student_layer_num = len(student_atts) - assert teacher_layer_num % student_layer_num == 0 - layers_per_block = int(teacher_layer_num / student_layer_num) - new_teacher_atts = [teacher_atts[i * layers_per_block + layers_per_block - 1] for i in range(student_layer_num)] + if teacher_layer_num % student_layer_num == 0: + layers_per_block = int(teacher_layer_num / student_layer_num) + new_teacher_atts = [teacher_atts[i * layers_per_block + layers_per_block - 1] for i in + range(student_layer_num)] - for student_att, teacher_att in zip(student_atts, new_teacher_atts): - student_att = ops.where(student_att <= -1e2, ops.zeros_like(student_att).cuda(), student_att) - teacher_att = ops.where(teacher_att <= -1e2, ops.zeros_like(teacher_att).cuda(), teacher_att) - att_loss += self.loss_mse(student_att, teacher_att) + for student_att, teacher_att in zip(student_atts, new_teacher_atts): + student_att = ops.where(student_att <= -1e2, ops.zeros_like(student_att).cuda(), student_att) + teacher_att = ops.where(teacher_att <= -1e2, ops.zeros_like(teacher_att).cuda(), teacher_att) + att_loss += self.loss_mse(student_att, teacher_att) - new_teacher_reps = [teacher_reps[i * layers_per_block] for i in range(student_layer_num + 1)] - new_student_reps = student_reps + new_teacher_reps = [teacher_reps[i * layers_per_block] for i in range(student_layer_num + 1)] + new_student_reps = student_reps - for student_rep, teacher_rep in zip(new_student_reps, new_teacher_reps): - rep_loss += self.loss_mse(student_rep, teacher_rep) + for student_rep, teacher_rep in zip(new_student_reps, new_teacher_reps): + rep_loss += self.loss_mse(student_rep, teacher_rep) - loss = att_loss + rep_loss - return loss + loss = att_loss + rep_loss + return loss + else: + raise ValueError('Num of layer is Wrong.') diff --git a/vega/modules/get_module_class.py b/vega/modules/get_module_class.py index fa19b89..bd015d6 100644 --- a/vega/modules/get_module_class.py +++ b/vega/modules/get_module_class.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Utils function of SearchSpace.""" from vega.common import ClassFactory, ClassType diff --git a/vega/modules/getters/__init__.py b/vega/modules/getters/__init__.py index fec28d2..b0291dd 100644 --- a/vega/modules/getters/__init__.py +++ b/vega/modules/getters/__init__.py @@ -1 +1 @@ -from .graph_getter import * +from .graph_getter import GraphGetter diff --git a/vega/modules/getters/graph_getter.py b/vega/modules/getters/graph_getter.py index 0529415..21e97b9 100644 --- a/vega/modules/getters/graph_getter.py +++ b/vega/modules/getters/graph_getter.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Getter for Graph.""" from collections import OrderedDict diff --git a/vega/modules/graph_utils.py b/vega/modules/graph_utils.py index e7e3534..cf8ee8b 100644 --- a/vega/modules/graph_utils.py +++ b/vega/modules/graph_utils.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Graph utils to Modules.""" import logging -from dag import DAG from collections import OrderedDict import re +from vega.common.dag import DAG from .nodes import Node from .nodes import Sequential, Add @@ -71,7 +77,7 @@ def ops2dag(merged_ops): for name, node in merged_ops.items(): inps = node['inputs'] pre_node_name = 'root' - dag.add_node_if_not_exists(name) + dag.add_node(name) dot.node(name=name, label=name) if inps is not None: for inp in inps: @@ -91,7 +97,7 @@ class Dag2Module(object): """Parse dag to module desc.""" def __init__(self, dag, ops): - self.g = dag.graph + self.g = dag.nodes self.ops = ops self.e = self._convert_edge_list() self.muti_edges = [k for k, v in self.g.items() if len(v) > 1] diff --git a/vega/modules/loss/ProbOhemCrossEntropy2d.py b/vega/modules/loss/ProbOhemCrossEntropy2d.py index 977d172..17a8d44 100644 --- a/vega/modules/loss/ProbOhemCrossEntropy2d.py +++ b/vega/modules/loss/ProbOhemCrossEntropy2d.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ProbOhemCrossEntropy2d loss function.""" import torch diff --git a/vega/modules/loss/__init__.py b/vega/modules/loss/__init__.py index bf6c045..0b142ed 100644 --- a/vega/modules/loss/__init__.py +++ b/vega/modules/loss/__init__.py @@ -1,13 +1,8 @@ -from .loss import Loss from vega.common.class_factory import ClassFactory +from .loss import Loss ClassFactory.lazy_register("vega.modules.loss", { "multiloss": ["trainer.loss:MultiLoss", "trainer.loss:SingleLoss"], - "focal_loss": ["trainer.loss:FocalLoss"], - "f1_loss": ["trainer.loss:F1Loss"], - "forecast_loss": ["trainer.loss:ForecastLoss"], - "mean_loss": ["trainer.loss:MeanLoss"], "ProbOhemCrossEntropy2d": ["trainer.loss:ProbOhemCrossEntropy2d"], - "gan_loss": ["trainer.loss:GANLoss"], "ms_custom_loss": ["trainer.loss:CustomSoftmaxCrossEntropyWithLogits"], }) diff --git a/vega/modules/loss/f1_loss.py b/vega/modules/loss/f1_loss.py deleted file mode 100644 index 23af509..0000000 --- a/vega/modules/loss/f1_loss.py +++ /dev/null @@ -1,45 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""FocalLoss for unbalanced data.""" -from vega.modules.operators import ops -from vega.modules.module import Module -from vega.common import ClassType, ClassFactory - - -@ClassFactory.register(ClassType.LOSS) -class F1Loss(Module): - """F1 Loss for unbalanced data.""" - - def __init__(self, epsilon=1e-7): - super(F1Loss, self).__init__() - self.epsilon = epsilon - - def call(self, inputs, targets): - """Compute loss. - - :param inputs: predict data. - :param targets: true data. - :return: - """ - y_true = ops.to(ops.one_hot(targets, 2, ), 'float32') - y_pred = ops.softmax(inputs, dim=1) - - tp = ops.reduce_sum(y_true * y_pred, dtype='float32') - # tn = ops.reduce_sum(((1 - y_true) * (1 - y_pred)), dtype='float32') - fp = ops.reduce_sum(((1 - y_true) * y_pred), dtype='float32') - fn = ops.reduce_sum((y_true * (1 - y_pred)), dtype='float32') - - precision = tp / (tp + fp + self.epsilon) - recall = tp / (tp + fn + self.epsilon) - - f1 = 2 * (precision * recall) / (precision + recall + self.epsilon) - f1 = ops.clamp(f1, min=self.epsilon, max=1 - self.epsilon) - return 1 - f1.mean() diff --git a/vega/modules/loss/focal_loss.py b/vega/modules/loss/focal_loss.py deleted file mode 100644 index b1601e6..0000000 --- a/vega/modules/loss/focal_loss.py +++ /dev/null @@ -1,59 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""FocalLoss for unbalanced data.""" -from vega.modules.operators import ops -from vega.modules.module import Module -from vega.common import ClassType, ClassFactory - - -@ClassFactory.register(ClassType.LOSS) -class FocalLoss(Module): - """Focal Loss for unbalanced data. - - :param alpha(1D Tensor, Variable): the scalar factor for this criterion - :param gamma(float, double): gamma > 0; reduces the relative loss for well-classified examples (p > .5), - putting more focus on hard, misclassified examples - :param size_average(bool): By default, the losses are averaged over observations for each minibatch. - However, if the field size_average is set to False, the losses are instead summed for each minibatch. - """ - - def __init__(self, class_num=2, alpha=None, gamma=2, size_average=True): - super(FocalLoss, self).__init__() - if alpha is None: - self.alpha = ops.ones(class_num, 1) - self.gamma = gamma - self.class_num = class_num - self.size_average = size_average - - def call(self, inputs, targets): - """Compute loss. - - :param inputs: predict data. - :param targets: true data. - :return: - """ - N = inputs.size(0) - C = inputs.size(1) - P = ops.softmax(inputs) - class_mask = inputs.data.new(N, C).fill_(0) - ids = targets.view(-1, 1) - class_mask.scatter_(1, ids.data, 1.) - if inputs.is_cuda and not self.alpha.is_cuda: - self.alpha = self.alpha.cuda() - alpha = self.alpha[ids.data.view(-1)] - probs = (P * class_mask).sum(1).view(-1, 1) - log_p = probs.log() - batch_loss = -alpha * (ops.pow((1 - probs), self.gamma)) * log_p - if self.size_average: - loss = batch_loss.mean() - else: - loss = batch_loss.sum() - return loss diff --git a/vega/modules/loss/forecast_loss.py b/vega/modules/loss/forecast_loss.py deleted file mode 100644 index 9ee9599..0000000 --- a/vega/modules/loss/forecast_loss.py +++ /dev/null @@ -1,35 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""FocalLoss for unbalanced data.""" -from vega.modules.module import Module -from vega.common import ClassType, ClassFactory - - -@ClassFactory.register(ClassType.LOSS) -class ForecastLoss(Module): - """Forecast Loss for St data.""" - - def __init__(self, epsilon=1e-4): - super(ForecastLoss, self).__init__() - self.epsilon = epsilon - - def call(self, y_pred, y_true): - """Compute loss. - - :param inputs: predict data. - :param targets: true data. - :return: - """ - import tensorflow as tf - y_true = tf.cast(y_true[2], tf.float32) - mae_loss = tf.reduce_sum(tf.losses.absolute_difference(y_true, y_pred)) - mse_loss = tf.nn.l2_loss(y_pred - y_true) - return mae_loss + self.epsilon * mse_loss diff --git a/vega/modules/loss/gan_loss.py b/vega/modules/loss/gan_loss.py deleted file mode 100644 index 02a4e8a..0000000 --- a/vega/modules/loss/gan_loss.py +++ /dev/null @@ -1,65 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""FocalLoss for unbalanced data.""" - -from vega.modules.module import Module -from vega.common import ClassType, ClassFactory -import torch -from torch import nn - - -@ClassFactory.register(ClassType.LOSS) -class DisLoss(Module): - """DisLoss.""" - - def __init__(self): - super(DisLoss, self).__init__() - - def call(self, inputs, targets): - """Compute loss. - - :param inputs: predict data. - :param targets: true data. - :return: - """ - real_validity, fake_validity = inputs - d_loss = torch.mean(nn.ReLU(inplace=True)(1.0 - real_validity)) + \ - torch.mean(nn.ReLU(inplace=True)(1 + fake_validity)) - return d_loss - - -@ClassFactory.register(ClassType.LOSS) -class GenLoss(Module): - """GenLoss.""" - - def __init__(self): - super(GenLoss, self).__init__() - - def call(self, inputs, targets): - """Compute loss. - - :param inputs: predict data. - :param targets: true data. - :return: - """ - fake_validity = inputs - g_loss = -torch.mean(fake_validity) - return g_loss - - -@ClassFactory.register(ClassType.LOSS) -class GANLoss(Module): - """F1 Loss for unbalanced data.""" - - def __init__(self): - super(GANLoss, self).__init__() - self.dis_loss = DisLoss() - self.gen_loss = GenLoss() diff --git a/vega/modules/loss/loss.py b/vega/modules/loss/loss.py index 53791bd..952f9c0 100644 --- a/vega/modules/loss/loss.py +++ b/vega/modules/loss/loss.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Manage Loss class.""" import logging from inspect import isclass @@ -61,7 +67,7 @@ def __call__(self): ClassFactory.register_from_package(timm_loss, ClassType.LOSS) except Exception: - pass + logging.debug('Falied to get timm loss.') elif vega.is_tf_backend(): import tensorflow.compat.v1.losses as tf_loss diff --git a/vega/modules/loss/mean_loss.py b/vega/modules/loss/mean_loss.py deleted file mode 100644 index 4c70d86..0000000 --- a/vega/modules/loss/mean_loss.py +++ /dev/null @@ -1,25 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""MeanLoss for data.""" -from vega.modules.module import Module -from vega.common import ClassType, ClassFactory - - -@ClassFactory.register(ClassType.LOSS) -class MeanLoss(Module): - """MeanLoss Loss for data.""" - - def __init__(self): - super(MeanLoss, self).__init__() - - def call(self, inputs, targets): - """Compute loss, mean() to average on multi-gpu.""" - return inputs.mean() diff --git a/vega/modules/loss/ms_custom_loss.py b/vega/modules/loss/ms_custom_loss.py index 65fdb8f..e1dc773 100644 --- a/vega/modules/loss/ms_custom_loss.py +++ b/vega/modules/loss/ms_custom_loss.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """CustomSoftmaxCrossEntropyWithLogits.""" diff --git a/vega/modules/loss/multiloss.py b/vega/modules/loss/multiloss.py index 6c67988..a9e62d3 100644 --- a/vega/modules/loss/multiloss.py +++ b/vega/modules/loss/multiloss.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """jdd_loss for task.""" diff --git a/vega/modules/module.py b/vega/modules/module.py index c95d91b..72f4878 100644 --- a/vega/modules/module.py +++ b/vega/modules/module.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Module Define.""" diff --git a/vega/modules/necks/parallel_fpn.py b/vega/modules/necks/parallel_fpn.py index ebf6bfd..8f363d8 100644 --- a/vega/modules/necks/parallel_fpn.py +++ b/vega/modules/necks/parallel_fpn.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is Network for SerialNet.""" from vega.common import ClassFactory, ClassType from vega.modules.operators import ops @@ -18,13 +24,15 @@ class ParallelFPN(Module): """Parallel FPN.""" - def __init__(self, in_channels=[64, 128, 256, 512], out_channels=256, code=None, + def __init__(self, in_channels=None, out_channels=256, code=None, weight_file=None, weights_prefix='head.backbone.1'): """Init FPN. :param desc: config dict """ super(ParallelFPN, self).__init__() + if in_channels is None: + in_channels = [64, 128, 256, 512] self.code = code self.inner_blocks = ModuleList() self.layer_blocks = ModuleList() diff --git a/vega/modules/nodes.py b/vega/modules/nodes.py index bd605f3..09b432c 100644 --- a/vega/modules/nodes.py +++ b/vega/modules/nodes.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Nodes for Modules.""" diff --git a/vega/modules/operators/__init__.py b/vega/modules/operators/__init__.py index 55d8e4b..f582bd0 100644 --- a/vega/modules/operators/__init__.py +++ b/vega/modules/operators/__init__.py @@ -1,6 +1,7 @@ -from .conv import * -from .cell import * -from .mix_ops import * -from .prune import * -from .ops import * -from .prune_filter import * +from .conv import conv3x3, conv1X1, conv5x5, conv7x7, conv_bn_relu6, conv_bn_relu, ConvBnRelu, \ + SeparatedConv, DilConv, GAPConv1x1, FactorizedReduce, ReLUConvBN, Seq, GhostConv2d +from .cell import Cell, NormalCell, ReduceCell, ContextualCell_v1, AggregateCell +from .mix_ops import MixedOp +from .prune import PruneConv2D, PruneBatchNorm, PruneLinear, PruneResnet, PruneMobileNet +from .prune_filter import PruneConv2DFilter, PruneBatchNormFilter, PruneLinearFilter +from . import ops diff --git a/vega/modules/operators/cell.py b/vega/modules/operators/cell.py index d039425..f452076 100644 --- a/vega/modules/operators/cell.py +++ b/vega/modules/operators/cell.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import all torch operators.""" import vega @@ -137,7 +143,6 @@ def call(self, s0, s1, weights=None, drop_path_prob=0, selected_idxs=None): states_list = () for i in self._concat: states_list += (states[i],) - # states_list = tuple([states[i] for i in self._concat]) return ops.concat(states_list) @@ -197,7 +202,6 @@ def __init__(self, op_names, config, inp, repeats=1, concat=False): # turn-off scaling in batch norm self.ops.append(OPS[op_name](inp, 1, True, repeats)) self._pos.append(pos) - # self._collect_inds.append(ind * 3 + ind2 - 1) # Do not collect intermediate self._pools.append('{}({})'.format( op_name, self._pools[pos])) # summation @@ -218,8 +222,10 @@ def call(self, x): feats = [x] for pos, op in zip(self._pos, self.ops): if isinstance(pos, list): - assert len(pos) == 2, "Two ops must be provided" - feats.append(op(feats[pos[0]], feats[pos[1]])) + if len(pos) == 2: + feats.append(op(feats[pos[0]], feats[pos[1]])) + else: + raise ValueError("Two ops must be provided") else: feats.append(op(feats[pos])) out = 0 diff --git a/vega/modules/operators/conv.py b/vega/modules/operators/conv.py index 113d9c8..b59b8f3 100644 --- a/vega/modules/operators/conv.py +++ b/vega/modules/operators/conv.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import all torch operators.""" import math @@ -161,7 +167,8 @@ def __init__(self, C_in, C_out, affine=True): :param affine: whether to use affine in BN """ super(FactorizedReduce, self).__init__() - assert C_out % 2 == 0 + if C_out % 2 != 0: + raise ValueError('Outchannel must be divided by 2.') self.relu = ops.Relu(inplace=False) self.conv_1 = ops.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) self.conv_2 = ops.Conv2d(C_in, C_out // 2, 1, stride=2, padding=0, bias=False) diff --git a/vega/modules/operators/functions/adaptive_weight_ms.py b/vega/modules/operators/functions/adaptive_weight_ms.py index df645bc..57fc3e2 100644 --- a/vega/modules/operators/functions/adaptive_weight_ms.py +++ b/vega/modules/operators/functions/adaptive_weight_ms.py @@ -1,19 +1,25 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Adaptive weight.""" import os import logging +import uuid from mindspore.train.serialization import save_checkpoint, load_checkpoint from mindspore import Tensor import numpy as np -import uuid def adaptive_weight(ckpt_file, ms_model): @@ -42,7 +48,6 @@ def adaptive_weight(ckpt_file, ms_model): param_dict['name'] = net_para_name param_dict['data'] = init_weight if net_para_shape == init_para_shape else new_weight new_ms_params_list.append(param_dict) - # parameter_dict[net_para_name].data = new_weight save_path = os.path.dirname(ckpt_file) save_file_name = os.path.join(save_path, "adaptive_" + uuid.uuid1().hex[:8] + ".ckpt") save_checkpoint(new_ms_params_list, save_file_name) diff --git a/vega/modules/operators/functions/mindspore_fn.py b/vega/modules/operators/functions/mindspore_fn.py index 4a5c67b..8ef5eb7 100644 --- a/vega/modules/operators/functions/mindspore_fn.py +++ b/vega/modules/operators/functions/mindspore_fn.py @@ -1,27 +1,32 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. -"""Custom functions of pytorch.""" -import math -from functools import reduce -import mindspore.nn as nn -import mindspore -import numpy as np +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Custom functions of mindspore.""" + import os import uuid -from mindspore.ops import operations as P +import numpy as np +import mindspore.nn as nn +import mindspore import mindspore.ops as ops +from mindspore.ops import operations as P from mindspore import Parameter, Tensor -from mindspore.common import initializer as init -from mindspore.common.initializer import initializer -from .serializable import OperatorSerializable +from mindspore.common.initializer import initializer, HeNormal from vega.common.class_factory import ClassType, ClassFactory +from .serializable import OperatorSerializable class Module(nn.Cell): @@ -50,7 +55,6 @@ def __setattr__(self, name, value): """Overide __setattr__.""" if isinstance(value, nn.Cell): super().__setattr__(name, value) - # value.update_parameters_name(name + uuid.uuid1().hex[:8] + '.') self.children_ms = list(self._cells.values()) else: super().__setattr__(name, value) @@ -67,12 +71,10 @@ def named_modules(self): _names_modules.extend(child_modules) return _names_modules - # def _apply_names(self, parent_name=''): """Apply names spaces.""" for scope_name, module in self.name_cells().items(): scope_name = '{}.{}'.format(parent_name, scope_name) if parent_name else scope_name - # module.update_parameters_name(scope_name + '.') module.name = scope_name + '/' + module.__class__.__name__ if hasattr(module, "_apply_names"): module._apply_names(scope_name) @@ -108,7 +110,6 @@ def construct(self, *inputs): def set_parameters(self, name, value): """Set Parameters.""" - # self.insert_param_to_cell(name, value) setattr(self, name, value) return 0 @@ -194,7 +195,6 @@ def __init__(self, size=None): if size is not None and not isinstance(size, tuple): self.size = tuple(size) self.shape = P.Shape() - # self.squeeze = P.Squeeze((1, 2)) def construct(self, inputs): """Call squeeze function.""" @@ -202,7 +202,6 @@ def construct(self, inputs): return self.reshape(inputs, (self.shape(inputs)[0], -1)) else: return self.reshape(inputs, self.size) - # return self.squeeze(inputs) @ClassFactory.register(ClassType.NETWORK) @@ -221,64 +220,11 @@ def construct(self, input): return self.linear(input) -class KaimingNormal(init.Initializer): - """Call KaimingNormal.""" - - def __init__(self, a=0, mode='fan_in', nonlinearity='relu'): - super(KaimingNormal, self).__init__() - self.mode = mode - self.gain = math.sqrt(2.0) - - def _calculate_in_and_out(self, arr): - dim = len(arr.shape) - if dim < 2: - raise ValueError("If initialize data with xavier uniform, the dimension of data must greater than 1.") - - n_in = arr.shape[1] - n_out = arr.shape[0] - - if dim > 2: - counter = reduce(lambda x, y: x * y, arr.shape[2:]) - n_in *= counter - n_out *= counter - return n_in, n_out - - def _select_fan(self, array, mode): - mode = mode.lower() - valid_modes = ['fan_in', 'fan_out'] - if mode not in valid_modes: - raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes)) - - fan_in, fan_out = self._calculate_in_and_out(array) - return fan_in if mode == 'fan_in' else fan_out - - def _assignment(self, arr, num): - """Assign the value of `num` to `arr`.""" - if arr.shape == (): - arr = arr.reshape((1)) - arr[:] = num - arr = arr.reshape(()) - else: - if isinstance(num, np.ndarray): - arr[:] = num[:] - else: - arr[:] = num - return arr - - def _initialize(self, arr): - fan = self._select_fan(arr, self.mode) - std = self.gain / math.sqrt(fan) - np.random.seed(0) - data = np.random.normal(0, std, arr.shape) - - self._assignment(arr, data) - - @ClassFactory.register(ClassType.NETWORK) class DepthwiseConv2d(OperatorSerializable, nn.Cell): """Call DepthwiseConv2d.""" - def __init__(self, in_channels, kernel_size, stride, pad_mode, pad, channel_multiplier=1, has_bias=False, + def __init__(self, in_channels, kernel_size, stride, pad_mode, padding, channel_multiplier=1, has_bias=False, dilation=1): super(DepthwiseConv2d, self).__init__() self.has_bias = has_bias @@ -288,7 +234,7 @@ def __init__(self, in_channels, kernel_size, stride, pad_mode, pad, channel_mult self.kernel_size = (kernel_size, kernel_size) self.depthwise_conv = P.DepthwiseConv2dNative(channel_multiplier=channel_multiplier, kernel_size=self.kernel_size, - stride=stride, pad_mode=pad_mode, pad=pad, + stride=stride, pad_mode=pad_mode, pad=padding, dilation=dilation) self.bias_add = P.BiasAdd() weight_shape = [channel_multiplier, in_channels, *self.kernel_size] @@ -332,17 +278,10 @@ def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding=N self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, has_bias=bias, group=groups, dilation=dilation, pad_mode=pad_mode) self.conv2d.update_parameters_name("conv2d_" + uuid.uuid1().hex[:8] + ".") - - # elif in_channels == out_channels and in_channels == groups: - # self.conv2d = DepthwiseConv2d(in_channels, kernel_size=kernel_size, stride=stride, pad_mode=pad_mode, - # pad=padding, has_bias=bias, dilation=dilation) - # self.conv2d.update_parameters_name("conv2d_" + uuid.uuid1().hex[:8] + ".") else: - # TODO delete self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding, has_bias=bias, group=1, dilation=dilation, pad_mode=pad_mode) self.conv2d.update_parameters_name("conv2d_" + uuid.uuid1().hex[:8] + ".") - # raise ValueError("For group not equal to 1, the in_channels, out_chanels and group should be equal.") def construct(self, input): """Call conv2d function.""" @@ -351,13 +290,6 @@ def construct(self, input): def initial(self, kernel_mode='he', bias_mode='zero', kernel_scale=1., bias_scale=1.): """Initialize weight and bias.""" return - # if kernel_mode == 'he': - # self.conv2d.weight = init.initializer( # self.conv2d.weight.default_input for mindspore 0.5~0.7 - # KaimingNormal(a=0, mode='fan_in', nonlinearity='relu'), - # self.conv2d.weight.shape, self.conv2d.weight.dtype).to_tensor() - # if bias_mode == "zero": - # self.conv2d.bias = init.initializer( - # 'zeros', self.conv2d.bias.shape, self.conv2d.bias.dtype).to_tensor() @ClassFactory.register(ClassType.NETWORK) @@ -402,8 +334,6 @@ def __init__(self, kernel_size, stride, padding=0, pad_mode="valid"): self.padding = padding if padding > 0: self.pad_op = P.Pad(((0, 0), (0, 0), (padding, padding), (padding, padding))) - # if padding != 0: - # pad_mode = "same" self.max_pool2d = nn.MaxPool2d(kernel_size=kernel_size, stride=stride, pad_mode=pad_mode) self.max_pool2d.update_parameters_name("maxpool2d_" + uuid.uuid1().hex[:8] + ".") @@ -423,8 +353,6 @@ def __init__(self, kernel_size, stride, padding=0, count_include_pad=True, pad_m self.padding = padding if padding > 0: self.pad_op = P.Pad(((0, 0), (0, 0), (padding, padding), (padding, padding))) - # if padding != 0: - # pad_mode = "same" self.avg_pool2d = nn.AvgPool2d(kernel_size=kernel_size, stride=stride, pad_mode=pad_mode) self.avg_pool2d.update_parameters_name("avgpool2d_" + uuid.uuid1().hex[:8] + ".") @@ -517,7 +445,7 @@ def __init__(self, prob=0.5): super(Dropout, self).__init__() if prob == 0: prob = 1e-12 - self.dropout = nn.Dropout(1 - prob) # keep prob + self.dropout = nn.Dropout(1 - prob) def construct(self, x, **kwargs): """Do an inference on Dropout.""" @@ -535,7 +463,6 @@ def __init__(self, stride): """ super(Zero, self).__init__() self.zeroslike = P.ZerosLike() - # self.zeros = P.Zeros() self.stride = stride self.shape = P.Shape() @@ -545,10 +472,6 @@ def construct(self, x): :param x: input tensor :return: output tensor """ - # in_shape = self.shape(x) - # out_shape = (in_shape[0], in_shape[1], in_shape[2] // self.stride, in_shape[3] // self.stride) - # return Tensor(np.zeros(out_shape, np.float32)) - # return self.zeros(out_shape,mindspore.float32) return self.zeroslike(x[:, :, ::self.stride, ::self.stride]) @@ -579,7 +502,6 @@ def __init__(self, size=None, dim=0): self.dim = dim self.size = size self.shape = P.Shape() - # self.split = P.Split(axis=dim, output_num=size) def construct(self, inputs): """Call Split function.""" @@ -629,8 +551,8 @@ def construct(self, inputs): """Call Stack function.""" expands = [] for input in inputs: - expand = self.expand_dim(input, self.dim) - expands.append(expand) + expand_input = self.expand_dim(input, self.dim) + expands.append(expand_input) return self.concat(tuple(expands)) @@ -646,7 +568,6 @@ def __init__(self, dim1=0, dim2=1): def construct(self, inputs): """Call Transpose function.""" - # new_dim = [i for i in range(len(self.shape(inputs)))] new_dim = () for i in range(len(self.shape(inputs))): if i == self.dim1: @@ -656,8 +577,6 @@ def construct(self, inputs): else: index = i new_dim = new_dim + (index,) - # new_dim[self.dim1], new_dim[self.dim2] = new_dim[self.dim2], new_dim[self.dim1] - # return self.transpose(inputs, tuple(new_dim)) return self.transpose(inputs, new_dim) @@ -760,12 +679,6 @@ class Embedding(nn.Embedding, OperatorSerializable): def concat(inputs, dim=1): """Call concat according to backends.""" return P.Concat(dim)(inputs) - # if isinstance(inputs, tuple): - # return P.Concat(axis=dim)(inputs) - # elif isinstance(inputs, list): - # return P.Concat(axis=dim)(tuple(inputs)) - # else: - # raise TypeError("The type of input must be tuple or list, but get {}.".format(type(inputs))) def mul(a, b): @@ -775,10 +688,7 @@ def mul(a, b): def random_normal(*size): """Apply random values from a normal distribution.""" - # return P.StandardNormal()(size) return Tensor(np.random.randn(*size).astype(np.float32)) - # return P.Normal()(size, 0,1) - # return Parameter(Tensor(np.random.randn(*size)), name="random_" + uuid.uuid1().hex[:8]) def softmax(input, dim=-1): @@ -808,7 +718,6 @@ def interpolate(input, size, mode='bilinear', align_corners=False): def add_n(input): """Apply sum function.""" - # return sum(input) return P.AddN()(input) @@ -827,20 +736,11 @@ def drop_path(x, prob): :return: output feature map after dropout :rtype: torch tensor """ - # if prob <= 0.: - # return x - # keep = 1. - prob - # - # bernoulli_random = P.random.uniform([int(x.get_shape()[0]), 1, 1, 1]) - # mask = P.cast(bernoulli_random < keep, ms.float32) - # x = P.div(x, keep) - # x = P.multiply(x, mask) return x def pad(inputs, position): """Apply pad function.""" - # TODO the position of torch is a tuple and the order is reversed, but the mindspore is N*2 tuple and is in order pad_op = P.Pad(position) return pad_op(inputs) @@ -994,29 +894,13 @@ def matmul(x1, x2): class LayerNorm(OperatorSerializable, nn.Cell): """Layer Norm module.""" - def __init__(self, in_channels=None, eps=1e-12, return_2d=False): + def __init__(self, in_channels=None, eps=1e-12): super(LayerNorm, self).__init__() - self.return_2d = return_2d self.layer_norm = nn.LayerNorm((in_channels,)) - self.cast = P.Cast() - self.get_dtype = P.DType() - self.reshape = P.Reshape() - self.get_shape = P.Shape() def construct(self, input_tensor): """Layer norm.""" - shape = self.get_shape(input_tensor) - batch_size = shape[0] - max_len = shape[1] - embed_dim = shape[2] - - output = self.reshape(input_tensor, (-1, embed_dim)) - # output = self.cast(output, mstype.float32) - output = self.layer_norm(output) - output = self.cast(output, self.get_dtype(input_tensor)) - if not self.return_2d: - output = self.reshape(output, (batch_size, max_len, embed_dim)) - return output + return self.layer_norm(input_tensor) @ClassFactory.register(ClassType.NETWORK) diff --git a/vega/modules/operators/functions/pytorch_fn.py b/vega/modules/operators/functions/pytorch_fn.py index bd974a1..e79c578 100644 --- a/vega/modules/operators/functions/pytorch_fn.py +++ b/vega/modules/operators/functions/pytorch_fn.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Custom functions of pytorch.""" from collections import OrderedDict @@ -19,8 +25,8 @@ from torch.nn.quantized import Conv2d as QuantConv2d from torch.nn import Parameter as Torch_Parameter import vega -from .serializable import OperatorSerializable from vega.common.class_factory import ClassType, ClassFactory +from .serializable import OperatorSerializable class Module(nn.Module): @@ -573,7 +579,6 @@ def __init__(self, dim=0): def forward(self, inputs): """Call forward function.""" - # return torch.squeeze(inputs, self.dim) return inputs.squeeze(self.dim) @@ -759,7 +764,6 @@ def mean_all(inputs): def pad(inputs, position): """Apply pad function.""" - # return F.pad(inputs, position) dtype = inputs.dtype return F.pad(inputs.cpu().float(), position).to(vega.get_devices()).to(dtype) @@ -1037,9 +1041,9 @@ def conv_ws_2d(input, """ c_in = weight.size(0) weight_flat = weight.view(c_in, -1) - mean = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) + mean_weight = weight_flat.mean(dim=1, keepdim=True).view(c_in, 1, 1, 1) std = weight_flat.std(dim=1, keepdim=True).view(c_in, 1, 1, 1) - weight = (weight - mean) / (std + eps) + weight = (weight - mean_weight) / (std + eps) return F.conv2d(input, weight, bias, stride, padding, dilation, groups) diff --git a/vega/modules/operators/functions/pytorch_to_ms.py b/vega/modules/operators/functions/pytorch_to_ms.py index c28cfff..611f2cd 100644 --- a/vega/modules/operators/functions/pytorch_to_ms.py +++ b/vega/modules/operators/functions/pytorch_to_ms.py @@ -1,37 +1,47 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """Convert pytorch weight to mindspore checkpoint.""" + import os -import torch +import uuid import logging +import torch from mindspore.train.serialization import save_checkpoint from mindspore import Tensor -import uuid def pytorch2mindspore(pth_file): """Convert pytorch weight to mindspore checkpoint.""" torch_para_dict = torch.load(pth_file) - torch_weight_list = [] + (torch_paras_name_list, torch_weight_list, ms_para_name_list) = _convert_weight_name(torch_para_dict) + ms_params_list = _convert_weight_format(torch_paras_name_list, torch_weight_list, ms_para_name_list) + save_path = os.path.dirname(pth_file) + save_file_name = os.path.join(save_path, "torch2ms_" + uuid.uuid1().hex[:8] + ".ckpt") + save_checkpoint(ms_params_list, save_file_name) + return save_file_name + + +def _convert_weight_name(torch_para_dict): torch_paras_name_list = [] - ms_params_list = [] + torch_weight_list = [] ms_para_name_list = [] - for index, name in enumerate(torch_para_dict): torch_paras_name_list.append(name) torch_weight = torch_para_dict[name] - - # if name == "fc.weight": - # ms_name = "fc.linear.weight" - # elif name == "fc.bias": - # ms_name = "fc.linear.bias" if name.endswith("weight"): name = name[:name.rfind("weight")] ms_name = "backbone." + name + "conv2d.weight" @@ -39,37 +49,32 @@ def pytorch2mindspore(pth_file): name = name[:name.rfind('bias')] ms_name = "backbone." + name + 'batch_norm.beta' elif name.endswith('.running_mean'): - # fix batch_norm name old_name_gamma = ms_para_name_list[index - 2] new_name_gamma = old_name_gamma[:old_name_gamma.rfind('conv2d.weight')] + "batch_norm.gamma" ms_para_name_list[index - 2] = new_name_gamma - name = name[:name.rfind('.running_mean')] ms_name = "backbone." + name + '.batch_norm.moving_mean' - elif name.endswith('.running_var'): name = name[:name.rfind('.running_var')] ms_name = "backbone." + name + '.batch_norm.moving_variance' - elif name.endswith(".num_batches_tracked"): ms_name = name - torch_weight_list.append(torch_weight) ms_para_name_list.append(ms_name) + return torch_paras_name_list, torch_weight_list, ms_para_name_list + +def _convert_weight_format(torch_paras_name_list, torch_weight_list, ms_para_name_list): + ms_params_list = [] for index, name in enumerate(ms_para_name_list): - logging.debug('========================py_name: {}'.format(torch_paras_name_list[index])) - logging.debug('========================ms_name: {}'.format(name)) + logging.debug('==> py_name: {}'.format(torch_paras_name_list[index])) + logging.debug('==> ms_name: {}'.format(name)) param_dict = {} param_dict['name'] = name parameter = torch_weight_list[index] param_dict['data'] = Tensor(parameter.detach().numpy()) ms_params_list.append(param_dict) - - save_path = os.path.dirname(pth_file) - save_file_name = os.path.join(save_path, "torch2ms_" + uuid.uuid1().hex[:8] + ".ckpt") - save_checkpoint(ms_params_list, save_file_name) - return save_file_name + return ms_params_list def pytorch2mindspore_extend(pth_file, model): @@ -82,17 +87,31 @@ def pytorch2mindspore_extend(pth_file, model): init_weights_list.append(init_para_dict[name]) vega_names_list = [] - vega_weights_list = [] - valid_names_list = [] for name in model.parameters_dict(): if not name.endswith("num_batches_tracked"): vega_names_list.append(name) + valid_names_list, vega_weights_list = _get_name_weight(vega_names_list, init_names_list, init_weights_list) + ms_params_list = [] + for index, name in enumerate(valid_names_list): + param_dict = {} + param_dict['name'] = name + parameter = vega_weights_list[index] + param_dict['data'] = Tensor(parameter.detach().numpy()) + ms_params_list.append(param_dict) + save_path = os.path.dirname(pth_file) + save_file_name = os.path.join(save_path, "torch2ms_" + uuid.uuid1().hex[:8] + ".ckpt") + save_checkpoint(ms_params_list, save_file_name) + return save_file_name + + +def _get_name_weight(vega_names_list, init_names_list, init_weights_list): + """Get name and weight from torch.""" + vega_weights_list = [] + valid_names_list = [] for index, name in enumerate(vega_names_list): init_name = init_names_list[index] - # if index < 1: - # continue if name.endswith("weight") and ("conv" or "downsample" in name or "down_sample" in name) and init_name.endswith( "weight") and ("conv" in init_name or "downsample" in init_name or "down_sample" in init_name): valid_names_list.append(name) @@ -114,16 +133,4 @@ def pytorch2mindspore_extend(pth_file, model): vega_weights_list.append(init_weights_list[index]) else: continue - - ms_params_list = [] - - for index, name in enumerate(valid_names_list): - param_dict = {} - param_dict['name'] = name - parameter = vega_weights_list[index] - param_dict['data'] = Tensor(parameter.detach().numpy()) - ms_params_list.append(param_dict) - save_path = os.path.dirname(pth_file) - save_file_name = os.path.join(save_path, "torch2ms_" + uuid.uuid1().hex[:8] + ".ckpt") - save_checkpoint(ms_params_list, save_file_name) - return save_file_name + return valid_names_list, vega_weights_list diff --git a/vega/modules/operators/functions/pytorch_to_tf.py b/vega/modules/operators/functions/pytorch_to_tf.py index 108bd89..32c7c24 100644 --- a/vega/modules/operators/functions/pytorch_to_tf.py +++ b/vega/modules/operators/functions/pytorch_to_tf.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Convert pytorch weight to tf checkpoint.""" import logging import re -import numpy from collections import OrderedDict +import numpy def get_assignment_map(checkpoint_path, pop_global_step=True): diff --git a/vega/modules/operators/functions/serializable.py b/vega/modules/operators/functions/serializable.py index c98826d..b4b9956 100644 --- a/vega/modules/operators/functions/serializable.py +++ b/vega/modules/operators/functions/serializable.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Convert class to desc.""" from copy import deepcopy @@ -52,21 +58,21 @@ def from_module(cls, module): raise NotImplementedError @property - def md5(self): - """MD5 value of network description.""" - return self.get_md5(self.to_desc(False)) + def sha256(self): + """SHA256 value of network description.""" + return self.get_sha256(self.to_desc(False)) @classmethod - def get_md5(cls, desc): - """Get desc's short md5 code. + def get_sha256(cls, desc): + """Get desc's short sha256 code. :param desc: network description. :type desc: str. - :return: short MD5 code. + :return: short sha256 code. :rtype: str. """ - code = hashlib.md5(json.dumps(desc, sort_keys=True).encode('utf-8')).hexdigest() + code = hashlib.sha256(json.dumps(desc, sort_keys=True).encode('utf-8')).hexdigest() return code[:8] @property diff --git a/vega/modules/operators/functions/tensorflow_fn.py b/vega/modules/operators/functions/tensorflow_fn.py index 9cb2948..e5276d8 100644 --- a/vega/modules/operators/functions/tensorflow_fn.py +++ b/vega/modules/operators/functions/tensorflow_fn.py @@ -1,18 +1,24 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Custom functions of tensorflow.""" import logging import math -import numpy as np from collections import OrderedDict +import numpy as np import tensorflow.compat.v1 as tf from tensorflow.python.ops import state_ops from vega.common.config import Config @@ -81,7 +87,6 @@ def load_checkpoint(self, weight_file): if not weight_file: return logging.info("Load checkpoint form file ({}).".format(weight_file)) - # model_file = tf.train.latest_checkpoint(weight_file) reader = tf.train.NewCheckpointReader(weight_file) variables = reader.get_variable_to_shape_map() states = {v: reader.get_tensor(v) for v in variables} @@ -230,7 +235,6 @@ def __init__(self): def call(self, inputs, **kwargs): """Call QuantizeConv2d function.""" - # todo return inputs @@ -541,7 +545,6 @@ def call(self, input, **kwargs): if self._is_load_pretrained: self.training = True out = bn(inputs=input, training=self.training) - # update moving average if self._trainable: for item in bn.updates: tf.add_to_collections(tf.GraphKeys.UPDATE_OPS, item) @@ -753,7 +756,7 @@ def __init__(self, rgb_range, rgb_mean, rgb_std=(1.0, 1.0, 1.0), sign=-1): def call(self, inputs, *args, **kwargs): """Call MeanShift.""" std = tf.convert_to_tensor(self.rgb_std, dtype=tf.float32) - self.weight = tf.convert_to_tensor(np.eye(3).astype(np.float32)) # tf.eye(3) + self.weight = tf.convert_to_tensor(np.eye(3).astype(np.float32)) self.weight = tf.div(self.weight, std) self.bias = self.sign * self.rgb_range * tf.convert_to_tensor(self.rgb_mean, dtype=tf.float32) self.bias = tf.div(self.bias, std) @@ -838,7 +841,6 @@ def gumbel_softmax_sample(input, temperature, eps=1e-20): def gumbel_softmax(input, dim=-1, tau=1, hard=True, eps=1e-20): """Apply a gumbel-softmax function.""" - # keep_dims = True if dim == -1 else False y = gumbel_softmax_sample(input, tau, eps) if hard: y_hard = tf.cast(tf.equal(y, tf.reduce_max(y, 1, keep_dims=True)), y.dtype) diff --git a/vega/modules/operators/mix_ops.py b/vega/modules/operators/mix_ops.py index 1e87ecb..d815664 100644 --- a/vega/modules/operators/mix_ops.py +++ b/vega/modules/operators/mix_ops.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import all torch operators.""" from vega.common import ClassType, ClassFactory diff --git a/vega/modules/operators/ops.py b/vega/modules/operators/ops.py index 7be2ac4..20f4fcf 100644 --- a/vega/modules/operators/ops.py +++ b/vega/modules/operators/ops.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """unified operators.""" from functools import partial diff --git a/vega/modules/operators/prune.py b/vega/modules/operators/prune.py index 0f84586..2ac4caf 100644 --- a/vega/modules/operators/prune.py +++ b/vega/modules/operators/prune.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Prune operators.""" import numpy as np @@ -33,7 +39,7 @@ def get_shape(layer): return getattr(layer, para_name).default_input.shape -def is_ops_instance(layer, name): +def _is_ops_instance(layer, name): """Get weight shape.""" if vega.is_tf_backend(): return layer.name.find(name) > 0 @@ -41,7 +47,7 @@ def is_ops_instance(layer, name): return layer.__class__.__name__ == name -def get_named_modules(layer): +def _get_named_modules(layer): """Get named modules.""" if vega.is_tf_backend(): return [(op.name, op) for op in layer] @@ -51,7 +57,7 @@ def get_named_modules(layer): return layer._children_scope_recursive() -def parse_module_name(name, module): +def _parse_module_name(name, module): """Parse the module name of mindspore.""" if vega.is_ms_backend(): while (list(module.cells()) != []): @@ -205,8 +211,8 @@ def __init__(self, layer): def apply(self, chn_node_mask, chn_mask): """Apply mask to resnet.""" end_mask = [] - for name, m1 in get_named_modules(self.layer): - name, m1 = parse_module_name(name, m1) + for name, m1 in _get_named_modules(self.layer): + name, m1 = _parse_module_name(name, m1) if name.startswith('backbone.init_block'): if name.endswith('conv'): end_mask = chn_node_mask[0] @@ -219,7 +225,7 @@ def apply(self, chn_node_mask, chn_mask): continue block_idx = int(parsed_name[2][-1]) layer_idx = block_idx + 1 - if is_ops_instance(m1, 'Conv2d'): + if _is_ops_instance(m1, 'Conv2d'): if int(parsed_name[4]) == 0 and parsed_name[5].startswith('conv1'): start_mask = chn_node_mask[layer_idx - 1] end_mask = chn_mask[block_idx] @@ -227,14 +233,13 @@ def apply(self, chn_node_mask, chn_mask): elif int(parsed_name[4]) == 0 and parsed_name[5].startswith('conv2'): start_mask = end_mask end_mask = chn_node_mask[layer_idx] - # shortcut elif int(parsed_name[4]) == 1 and parsed_name[5].startswith('conv1'): start_mask = chn_node_mask[layer_idx - 1] end_mask = chn_node_mask[layer_idx] PruneConv2D(m1).apply(end_mask, start_mask) - elif is_ops_instance(m1, 'BatchNorm2d'): + elif _is_ops_instance(m1, 'BatchNorm2d'): PruneBatchNorm(m1).apply(end_mask) - elif is_ops_instance(m1, 'Linear'): + elif _is_ops_instance(m1, 'Linear'): PruneLinear(m1).apply(end_mask) return self.layer @@ -248,9 +253,8 @@ def __init__(self, layer): def apply(self, chn_mask): """Apply mask to resnet.""" end_mask = [] - # cur_idx = 1 - for idx, (name, m1) in enumerate(get_named_modules(self.layer)): - name, m1 = parse_module_name(name, m1) + for idx, (name, m1) in enumerate(_get_named_modules(self.layer)): + name, m1 = _parse_module_name(name, m1) if name.startswith('features'): if len(name.split('.')) == 3: module_length = len(m1._modules) @@ -262,7 +266,7 @@ def apply(self, chn_mask): start_mask = chn_mask[sequence_idx - 1] if sequence_idx > 0 else None end_mask = chn_mask[sequence_idx] elif block_idx < module_length - 2: - if is_ops_instance(m1, 'Conv2d'): + if _is_ops_instance(m1, 'Conv2d'): continue end_mask = chn_mask[sequence_idx] start_mask = end_mask @@ -270,10 +274,10 @@ def apply(self, chn_mask): start_mask = end_mask end_mask = chn_mask[sequence_idx + 1] - if is_ops_instance(m1, 'Conv2d'): + if _is_ops_instance(m1, 'Conv2d'): PruneConv2D(m1).apply(end_mask, start_mask) - elif is_ops_instance(m1, 'BatchNorm2d'): + elif _is_ops_instance(m1, 'BatchNorm2d'): PruneBatchNorm(m1).apply(end_mask) - elif name.startswith('classifier') and is_ops_instance(m1, 'Linear'): + elif name.startswith('classifier') and _is_ops_instance(m1, 'Linear'): PruneLinear(m1).apply(end_mask) return self.layer diff --git a/vega/modules/operators/prune_filter.py b/vega/modules/operators/prune_filter.py index e07b910..737da7f 100644 --- a/vega/modules/operators/prune_filter.py +++ b/vega/modules/operators/prune_filter.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Prune operators.""" import numpy as np @@ -21,9 +27,11 @@ def __init__(self, layer, props): self.props = props self.start_mask_code = self.props.get(self.layer.name + '/in_channels') if self.start_mask_code: - assert len(self.start_mask_code) == self.layer.in_channels + if len(self.start_mask_code) != self.layer.in_channels: + raise ValueError('Start_mask_code must be equal to inchannels.') self.end_mask_code = self.props.get(self.layer.name + '/out_channels') - assert len(self.end_mask_code) == self.layer.out_channels + if len(self.end_mask_code) != self.layer.out_channels: + raise ValueError('End_mask_code must be equal to outchannels.') def filter(self): """Apply mask to weight.""" @@ -73,7 +81,6 @@ def __init__(self, layer, props): self.layer = layer self.props = props self.mask_code = self.props.get(self.layer.name + '/num_features') - # assert len(self.mask_code) == self.layer.num_features def filter(self): """Apply mask to batchNorm.""" @@ -95,7 +102,8 @@ def __init__(self, layer, props): self.layer = layer self.props = props self.mask_code = self.props.get(self.layer.name + '/in_features') - assert len(self.mask_code) == self.layer.in_features + if len(self.mask_code) != self.layer.in_features: + raise ValueError('Failed to init Prune Linear.') def filter(self): """Apply mask to linear.""" diff --git a/vega/modules/operators/quant/__init__.py b/vega/modules/operators/quant/__init__.py index 4cd31d7..1428f13 100644 --- a/vega/modules/operators/quant/__init__.py +++ b/vega/modules/operators/quant/__init__.py @@ -2,6 +2,6 @@ if vega.is_tf_backend(): - from .tensorflow_quant import * + from .tensorflow_quant import QuantConv, quant_custom_ops elif vega.is_torch_backend(): - from .pytorch_quant import * + from .pytorch_quant import Quantizer, QuantConv, quant_custom_ops diff --git a/vega/modules/operators/quant/pytorch_quant.py b/vega/modules/operators/quant/pytorch_quant.py index 0be3091..6eebe22 100644 --- a/vega/modules/operators/quant/pytorch_quant.py +++ b/vega/modules/operators/quant/pytorch_quant.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Quantized Convlution.""" import math diff --git a/vega/modules/operators/quant/tensorflow_quant.py b/vega/modules/operators/quant/tensorflow_quant.py index feb333c..4fcab78 100644 --- a/vega/modules/operators/quant/tensorflow_quant.py +++ b/vega/modules/operators/quant/tensorflow_quant.py @@ -1,16 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Quantized Convlution.""" + import tensorflow.compat.v1 as tf -from ..ops import Module +from vega.modules.module import Module from ..functions.serializable import OperatorSerializable @@ -286,6 +293,16 @@ def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding='sa self.bias = bias self.kernel_size = kernel_size if isinstance(kernel_size, tuple) else (kernel_size, kernel_size) + @property + def out_channels(self): + """Output Channel for Module.""" + return self._out_channels + + @out_channels.setter + def out_channels(self, value): + """Output Channel for Module.""" + self._out_channels = value + def build(self, nbit_a=8, nbit_w=8, quan_name_w='dorefa', quan_name_a='dorefa', has_offset=False): """Config the quantization settings. diff --git a/vega/modules/preprocess/__init__.py b/vega/modules/preprocess/__init__.py index bd15cb5..08106a7 100644 --- a/vega/modules/preprocess/__init__.py +++ b/vega/modules/preprocess/__init__.py @@ -1 +1 @@ -from .stem import * +from .stem import PreOneStem diff --git a/vega/modules/preprocess/stem.py b/vega/modules/preprocess/stem.py index 854ae9b..6950e47 100644 --- a/vega/modules/preprocess/stem.py +++ b/vega/modules/preprocess/stem.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for preprocess.""" from vega.modules.module import Module diff --git a/vega/modules/tensformers/attention.py b/vega/modules/tensformers/attention.py index aefa6da..caef823 100644 --- a/vega/modules/tensformers/attention.py +++ b/vega/modules/tensformers/attention.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is Attentions classes.""" import math diff --git a/vega/modules/tensformers/embeddings.py b/vega/modules/tensformers/embeddings.py index 028506f..31770b0 100644 --- a/vega/modules/tensformers/embeddings.py +++ b/vega/modules/tensformers/embeddings.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is Embeddings classes.""" from vega.modules.operators import ops diff --git a/vega/modules/tensformers/encoder.py b/vega/modules/tensformers/encoder.py index bbaefd1..2fdff79 100644 --- a/vega/modules/tensformers/encoder.py +++ b/vega/modules/tensformers/encoder.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is Encode classes.""" import copy diff --git a/vega/modules/tensformers/output.py b/vega/modules/tensformers/output.py index fd6c4de..61407fc 100644 --- a/vega/modules/tensformers/output.py +++ b/vega/modules/tensformers/output.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is Output classes.""" from vega.modules.operators import ops diff --git a/vega/modules/tensformers/pertrained_hooks.py b/vega/modules/tensformers/pertrained_hooks.py index edbda5a..ca785fd 100644 --- a/vega/modules/tensformers/pertrained_hooks.py +++ b/vega/modules/tensformers/pertrained_hooks.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Compressed model filter.""" from collections import OrderedDict diff --git a/vega/modules/tensformers/pooler.py b/vega/modules/tensformers/pooler.py index e873eaf..89b4fdf 100644 --- a/vega/modules/tensformers/pooler.py +++ b/vega/modules/tensformers/pooler.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is Pooler for Bert.""" from vega.modules.operators import ops diff --git a/vega/networks/__init__.py b/vega/networks/__init__.py index 9059f74..5667989 100644 --- a/vega/networks/__init__.py +++ b/vega/networks/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import and register network automatically.""" @@ -36,7 +42,8 @@ "gcn": ["GCN"], "vit": ["VisionTransformer"], "mtm_sr": ["MtMSR"], - "unet": ["Unet"] + "unet": ["Unet"], + "decaug": ["DecAug"], }) diff --git a/vega/networks/adelaide.py b/vega/networks/adelaide.py index 1e22e45..9f4e0f7 100644 --- a/vega/networks/adelaide.py +++ b/vega/networks/adelaide.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The Adelaide model.""" from vega.networks.mobilenet import MobileNetV2Tiny diff --git a/vega/networks/bert.py b/vega/networks/bert.py index 6c2ad67..eb55c0a 100644 --- a/vega/networks/bert.py +++ b/vega/networks/bert.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Bert network.""" from vega.modules.module import Module from vega.common.class_factory import ClassType, ClassFactory diff --git a/vega/networks/decaug.py b/vega/networks/decaug.py new file mode 100644 index 0000000..b23d5e6 --- /dev/null +++ b/vega/networks/decaug.py @@ -0,0 +1,52 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DecAug network""" +from vega.common import ClassFactory, ClassType +from vega.modules.operators import ops +from vega.modules.module import Module + + +@ClassFactory.register(ClassType.NETWORK) +class DecAug(Module): + """Create DecAug Network.""" + + def __init__(self, hdim, num_classes=7, num_concept=3, **kwargs): + super(DecAug, self).__init__() + self.category_branch = ops.Linear(hdim, hdim) + self.concept_branch = ops.Linear(hdim, hdim) + self.relu = ops.Relu(inplace=True) + self.fc0 = ops.Linear(hdim, num_classes) + self.fcc0 = ops.Linear(hdim, num_concept) + self.classification = ops.Linear(hdim * 2, num_classes) + + def forward(self, x): + B, _ = x.shape + instance_embs = x #torch.reshape(x, (B, -1)) + + category_embs = self.category_branch(instance_embs) + logits_category = self.fc0(category_embs) + logits_category = ops.softmax(logits_category, dim=1) + + concept_embs = self.concept_branch(instance_embs) + logits_concept = self.fcc0(concept_embs) + logits_concept = ops.softmax(logits_concept, dim=1) + # concept branch + output = None + if not self.training: + embs = ops.concat((category_embs, concept_embs), 1) + output = self.classification(embs) + return output, logits_category, logits_concept, category_embs, concept_embs, self diff --git a/vega/networks/dnet.py b/vega/networks/dnet.py index 5935e8f..e5c11b9 100644 --- a/vega/networks/dnet.py +++ b/vega/networks/dnet.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """DNet network.""" from vega.common import ClassFactory, ClassType @@ -27,7 +33,6 @@ def __init__(self, encoding, n_class=1000): _big_model = "*" in block_str if _big_model: block_encoding_list = block_str.split('*') - # stem self.layers = Sequential( create_op('conv3', 3, curr_channel // 2, stride=2), ops.Relu(), @@ -37,7 +42,6 @@ def __init__(self, encoding, n_class=1000): ops.Relu() ) - # body if not _big_model: while index < len(macro_str): stride = 1 @@ -106,11 +110,10 @@ def conv33_sep(in_channel, out_channel, stride): 'conv3_grp2': lambda in_channel, out_channel, stride: conv33(in_channel, out_channel, stride, groups=2), 'conv3_grp4': lambda in_channel, out_channel, stride: conv33(in_channel, out_channel, stride, groups=4), 'conv3_base1': lambda in_channel, out_channel, stride: conv33_base(in_channel, out_channel, stride, base_channel=1), - # noqa: E501 'conv3_base16': lambda in_channel, out_channel, stride: conv33_base(in_channel, out_channel, stride, - base_channel=16), # noqa: E501 + base_channel=16), 'conv3_base32': lambda in_channel, out_channel, stride: conv33_base(in_channel, out_channel, stride, - base_channel=32), # noqa: E501 + base_channel=32), 'conv3_sep': lambda in_channel, out_channel, stride: conv33_sep(in_channel, out_channel, stride) } @@ -188,7 +191,6 @@ def __init__(self, block_str, in_channel, op_names, stride=1, channel_increase=1 connect_index = 0 self.module_list = ModuleList() - # self.module_list = [] length = len(layer_str) // 2 stride_place = 0 while (stride_place + 1) * 2 < len(layer_str) and layer_str[stride_place * 2] == '1': @@ -230,3 +232,8 @@ def call(self, x, **kwargs): current = ops.Relu()(outs[-1]) return current + + def to_desc(self, recursion=True): + """Convert to desc.""" + self.desc.update({"type": "EncodedBlock"}) + return dict(self.desc) diff --git a/vega/networks/erdb_esr.py b/vega/networks/erdb_esr.py index 4e3ea42..38be232 100644 --- a/vega/networks/erdb_esr.py +++ b/vega/networks/erdb_esr.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Efficient residual dense models for super-resolution.""" import math diff --git a/vega/networks/faster_backbone.py b/vega/networks/faster_backbone.py index 506665d..af36db3 100644 --- a/vega/networks/faster_backbone.py +++ b/vega/networks/faster_backbone.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for network.""" from vega.common import ClassFactory, ClassType from vega.modules.module import Module -from .spnet_backbone import SpResNetDet from vega.modules.operators.ops import Linear, AdaptiveAvgPool2d, View +from .spnet_backbone import SpResNetDet @ClassFactory.register(ClassType.NETWORK) diff --git a/vega/networks/faster_rcnn.py b/vega/networks/faster_rcnn.py index 2f5f7d8..aea2625 100644 --- a/vega/networks/faster_rcnn.py +++ b/vega/networks/faster_rcnn.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is FasterRCNN network.""" import copy diff --git a/vega/networks/gcn.py b/vega/networks/gcn.py index ebcd499..9da7f8e 100644 --- a/vega/networks/gcn.py +++ b/vega/networks/gcn.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is FasterRCNN network.""" from vega.common import ClassFactory, ClassType @@ -17,9 +23,11 @@ class GCN(Module): """Create ResNet Network.""" - def __init__(self, blocks=[[1, 32, 64]], kernel_size=4, gru_layers=1, gcn_layers=1, keep_prob=1, + def __init__(self, blocks=None, kernel_size=4, gru_layers=1, gcn_layers=1, keep_prob=1, temporal_attention=False, spatial_attention=False, adjacency_matrix=None): super().__init__() + if blocks is None: + blocks = [[1, 32, 64], ] self.kernel_size = kernel_size self.blocks = blocks self.gru_layers = gru_layers diff --git a/vega/networks/mindspore/__init__.py b/vega/networks/mindspore/__init__.py index c81175d..ab97806 100644 --- a/vega/networks/mindspore/__init__.py +++ b/vega/networks/mindspore/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import mindspore network.""" diff --git a/vega/networks/mindspore/backbones/load_official_model.py b/vega/networks/mindspore/backbones/load_official_model.py index b42c812..223e003 100644 --- a/vega/networks/mindspore/backbones/load_official_model.py +++ b/vega/networks/mindspore/backbones/load_official_model.py @@ -1,19 +1,25 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Load the official model from mindspore modelzoo.""" -from vega.common import ClassType, ClassFactory -from official_model_conf import output_layer_map, location_map -import mindspore.nn as nn import operator import importlib +from official_model_conf import output_layer_map, location_map +import mindspore.nn as nn +from vega.common import ClassType, ClassFactory @ClassFactory.register(ClassType.NETWORK) diff --git a/vega/networks/mindspore/backbones/ms2vega.py b/vega/networks/mindspore/backbones/ms2vega.py index 2749e0d..15ff299 100644 --- a/vega/networks/mindspore/backbones/ms2vega.py +++ b/vega/networks/mindspore/backbones/ms2vega.py @@ -1,22 +1,28 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Transform torchvision model to vega.""" +from collections import OrderedDict import mindspore.nn as nn from mindspore.ops import operations as P -from collections import OrderedDict from vega.modules.operators import ops from vega.modules.blocks.blocks import BottleneckBlock from vega.modules.connections import Sequential -from .resnet import ResidualBlock from vega.networks.network_desc import NetworkDesc +from .resnet import ResidualBlock atom_op = (nn.Conv2d, nn.BatchNorm2d, nn.ReLU, nn.LeakyReLU, nn.MaxPool2d, nn.AvgPool2d, P.ReduceMean, nn.Dense, nn.Dropout, nn.Flatten) @@ -31,7 +37,6 @@ def _transform_op(init_layer): kernel_size = init_layer.kernel_size[0] stride = init_layer.stride padding = init_layer.padding - # bias = init_layer.bias new_layer = ops.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, bias=False) elif isinstance(init_layer, nn.BatchNorm2d): @@ -42,7 +47,6 @@ def _transform_op(init_layer): elif isinstance(init_layer, nn.MaxPool2d): kernel_size = init_layer.kernel_size stride = init_layer.stride - # padding = init_layer.padding new_layer = ops.MaxPool2d(kernel_size=kernel_size, stride=stride) elif isinstance(init_layer, nn.AvgPool2d): kernel_size = init_layer.kernel_size @@ -54,7 +58,6 @@ def _transform_op(init_layer): elif isinstance(init_layer, nn.Dense): in_features = init_layer.in_channels out_features = init_layer.out_channels - # use_bias = init_layer.bias new_layer = ops.Linear(in_features=in_features, out_features=out_features) elif isinstance(init_layer, nn.Dropout): prob = init_layer.p @@ -72,7 +75,6 @@ def _transform_block(init_block): if isinstance(init_block, ResidualBlock): inplanes = init_block.conv1.in_channels planes = init_block.bn1.num_features - # stride = init_block.stride downsample = init_block.down_sample stride = 2 if downsample else 1 new_block = BottleneckBlock(inchannel=inplanes, outchannel=planes, stride=stride) diff --git a/vega/networks/mindspore/backbones/official_model_conf.py b/vega/networks/mindspore/backbones/official_model_conf.py index f5dc3b5..82e47de 100644 --- a/vega/networks/mindspore/backbones/official_model_conf.py +++ b/vega/networks/mindspore/backbones/official_model_conf.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Load the official model from mindspore modelzoo.""" diff --git a/vega/networks/mindspore/backbones/resnet_ms.py b/vega/networks/mindspore/backbones/resnet_ms.py index 522518a..942e89e 100644 --- a/vega/networks/mindspore/backbones/resnet_ms.py +++ b/vega/networks/mindspore/backbones/resnet_ms.py @@ -1,16 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Resnst model.""" -from .resnet import ResNet, ResidualBlock from vega.common import ClassType, ClassFactory +from .resnet import ResNet, ResidualBlock _block_size = { 50: [3, 4, 6, 3], diff --git a/vega/networks/mindspore/dnet.py b/vega/networks/mindspore/dnet.py index 9fc8cef..7657d49 100644 --- a/vega/networks/mindspore/dnet.py +++ b/vega/networks/mindspore/dnet.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """DNet network.""" from vega.common import ClassFactory, ClassType @@ -158,20 +164,14 @@ def __init__(self, layer_sizes, strides, num1, num2): stride = 1 if strides[num1] != strides[num2]: stride = 2 - # print("stride:", stride, "layer_size:", layer_sizes, "num1, num2:", num1, num2) if stride != 1 or layer_sizes[num1] != layer_sizes[num2]: - # print("run here") self.conv = create_op('conv1', layer_sizes[num1], layer_sizes[num2], stride) - # print("self.conv:", self.conv) def call(self, x, **kwargs): """call.""" x1, x2 = x[self.num1], x[self.num2] - # print("%^" * 100, "x1 x2 shape:", x1.shape, x2.shape) - # print("$$" * 50, self.conv) if self.conv is not None: x1 = self.conv(x1) - # print("%" * 100, "x1 x2 shape new:", x1.shape, x2.shape) x[self.num2] = x1 + x2 return x @@ -219,7 +219,6 @@ def __init__(self, block_str, in_channel, op_names, stride=1, channel_increase=1 connect_index = 0 self.module_list = ModuleList() - # self.module_list = [] length = len(layer_str) // 2 stride_place = 0 while (stride_place + 1) * 2 < len(layer_str) and layer_str[stride_place * 2] == '1': @@ -229,7 +228,6 @@ def __init__(self, block_str, in_channel, op_names, stride=1, channel_increase=1 connect_parts.append("a0{}".format(length)) for i in range(length): - # print("length "*10, length) layer_module_list = ModuleList() layer_opt_name = op_names[int(layer_str[i * 2])] layer_in_channel = layer_sizes[-1] diff --git a/vega/networks/mindspore/faster_rcnn/anchor_generator.py b/vega/networks/mindspore/faster_rcnn/anchor_generator.py index 1a4bbc2..c3acb52 100644 --- a/vega/networks/mindspore/faster_rcnn/anchor_generator.py +++ b/vega/networks/mindspore/faster_rcnn/anchor_generator.py @@ -1,4 +1,4 @@ -# Copyright 2020-2021 Huawei Technologies Co., Ltd +# Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/vega/networks/mindspore/faster_rcnn/bbox_assign_sample_stage2.py b/vega/networks/mindspore/faster_rcnn/bbox_assign_sample_stage2.py index 1553612..16e0aea 100644 --- a/vega/networks/mindspore/faster_rcnn/bbox_assign_sample_stage2.py +++ b/vega/networks/mindspore/faster_rcnn/bbox_assign_sample_stage2.py @@ -1,12 +1,17 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ """FasterRcnn tpositive and negative sample screening for Rcnn.""" import numpy as np diff --git a/vega/networks/mindspore/faster_rcnn/faster_rcnn_resnet.py b/vega/networks/mindspore/faster_rcnn/faster_rcnn_resnet.py index d41e17f..10b8d2f 100644 --- a/vega/networks/mindspore/faster_rcnn/faster_rcnn_resnet.py +++ b/vega/networks/mindspore/faster_rcnn/faster_rcnn_resnet.py @@ -1,12 +1,21 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# Copyright 2020 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# 2021.11.29-Changed for SPNAS. +#      Huawei Technologies Co., Ltd. +# Copyright 2021 Huawei Technologies Co., Ltd. + """FasterRcnn based on ResNet.""" import numpy as np @@ -16,6 +25,9 @@ from mindspore.common.tensor import Tensor import mindspore.common.dtype as mstype from mindspore.ops import functional as F +from vega.common import ClassFactory, ClassType +from vega.algorithms.nas.sp_nas.src.model_utils.config import config +from vega.modules.module import Module from .resnet import ResNetFea, ResidualBlockUsing from .bbox_assign_sample_stage2 import BboxAssignSampleForRcnn from .fpn_neck import FeatPyramidNeck @@ -24,9 +36,6 @@ from .rpn import RPN from .roi_align import SingleRoIExtractor from .anchor_generator import AnchorGenerator -from vega.common import ClassFactory, ClassType -from vega.algorithms.nas.sp_nas.src.model_utils.config import config -from vega.modules.module import Module @ClassFactory.register(ClassType.NETWORK) @@ -76,7 +85,8 @@ def __init__(self, code='111-2111-211111-211', **kwargs): self.num_anchors = len(self.anchor_ratios) * len(self.anchor_scales) featmap_sizes = config.feature_shapes - assert len(featmap_sizes) == len(self.anchor_generators) + if len(featmap_sizes) != len(self.anchor_generators): + raise ValueError('Featuremap must be equal to anchor_generators.') self.anchor_list = self.get_anchors(featmap_sizes) @@ -115,7 +125,7 @@ def __init__(self, code='111-2111-211111-211', **kwargs): self.decode = P.BoundingBoxDecode(max_shape=(config.img_height, config.img_width), means=self.target_means, stds=self.target_stds) # Roi - self.roi_init(config) + self.roi_init(config_roi_init=config) # Rcnn self.rcnn = Rcnn(config, config.rcnn_in_channels * config.roi_layer.out_size * config.roi_layer.out_size, @@ -138,18 +148,18 @@ def __init__(self, code='111-2111-211111-211', **kwargs): self.concat_end = (self.num_classes - 1) # Test mode - self.test_mode_init(config) + self.test_mode_init(config_mode_init=config) # Init tensor - self.init_tensor(config) + self.init_tensor(config_tensor_init=config) self.device_type = "Ascend" if context.get_context("device_target") == "Ascend" else "Others" - def roi_init(self, config): + def roi_init(self, config_roi_init): """ Initialize roi from the config file. Args: - config (file): config file. + config_roi_init (file): config file. roi_layer (dict): Numbers of block in different layers. roi_align_out_channels (int): Out channel in each layer. config.roi_align_featmap_strides (list): featmap_strides in each layer. @@ -158,36 +168,36 @@ def roi_init(self, config): Examples: self.roi_init(config) """ - self.roi_align = SingleRoIExtractor(config, - config.roi_layer, - config.roi_align_out_channels, - config.roi_align_featmap_strides, + self.roi_align = SingleRoIExtractor(config_roi_init, + config_roi_init.roi_layer, + config_roi_init.roi_align_out_channels, + config_roi_init.roi_align_featmap_strides, self.train_batch_size, - config.roi_align_finest_scale) - self.roi_align.set_train_local(config, True) - self.roi_align_test = SingleRoIExtractor(config, - config.roi_layer, - config.roi_align_out_channels, - config.roi_align_featmap_strides, + config_roi_init.roi_align_finest_scale) + self.roi_align.set_train_local(config_roi_init, True) + self.roi_align_test = SingleRoIExtractor(config_roi_init, + config_roi_init.roi_layer, + config_roi_init.roi_align_out_channels, + config_roi_init.roi_align_featmap_strides, 1, - config.roi_align_finest_scale) - self.roi_align_test.set_train_local(config, False) + config_roi_init.roi_align_finest_scale) + self.roi_align_test.set_train_local(config_roi_init, False) - def test_mode_init(self, config): + def test_mode_init(self, config_mode_init): """ Initialize test_mode from the config file. Args: - config (file): config file. + config_mode_init (file): config file. test_batch_size (int): Size of test batch. rpn_max_num (int): max num of rpn. test_score_thresh (float): threshold of test score. test_iou_thr (float): threshold of test iou. Examples: - self.test_mode_init(config) + self.test_mode_init(config_mode_init) """ - self.test_batch_size = config.test_batch_size + self.test_batch_size = config_mode_init.test_batch_size self.split = P.Split(axis=0, output_num=self.test_batch_size) self.split_shape = P.Split(axis=0, output_num=4) self.split_scores = P.Split(axis=1, output_num=self.num_classes) @@ -195,7 +205,7 @@ def test_mode_init(self, config): self.tile = P.Tile() self.gather = P.GatherNd() - self.rpn_max_num = config.rpn_max_num + self.rpn_max_num = config_mode_init.rpn_max_num self.zeros_for_nms = Tensor(np.zeros((self.rpn_max_num, 3)).astype(self.dtype)) self.ones_mask = np.ones((self.rpn_max_num, 1)).astype(np.bool) @@ -205,24 +215,26 @@ def test_mode_init(self, config): self.nms_pad_mask = Tensor(np.concatenate((self.ones_mask, self.ones_mask, self.ones_mask, self.ones_mask, self.zeros_mask), axis=1)) - self.test_score_thresh = Tensor(np.ones((self.rpn_max_num, 1)).astype(self.dtype) * config.test_score_thr) + self.test_score_thresh = Tensor( + np.ones((self.rpn_max_num, 1)).astype(self.dtype) * config_mode_init.test_score_thr) self.test_score_zeros = Tensor(np.ones((self.rpn_max_num, 1)).astype(self.dtype) * 0) self.test_box_zeros = Tensor(np.ones((self.rpn_max_num, 4)).astype(self.dtype) * -1) - self.test_iou_thr = Tensor(np.ones((self.rpn_max_num, 1)).astype(self.dtype) * config.test_iou_thr) - self.test_max_per_img = config.test_max_per_img - self.nms_test = P.NMSWithMask(config.test_iou_thr) + self.test_iou_thr = Tensor(np.ones((self.rpn_max_num, 1)).astype(self.dtype) * config_mode_init.test_iou_thr) + self.test_max_per_img = config_mode_init.test_max_per_img + self.nms_test = P.NMSWithMask(config_mode_init.test_iou_thr) self.softmax = P.Softmax(axis=1) self.logicand = P.LogicalAnd() self.oneslike = P.OnesLike() self.test_topk = P.TopK(sorted=True) self.test_num_proposal = self.test_batch_size * self.rpn_max_num - def init_tensor(self, config): + def init_tensor(self, config_tensor_init): """Construct the trainer of SpNas.""" - roi_align_index = [np.array(np.ones((config.num_expected_pos_stage2 + config.num_expected_neg_stage2, 1)) * i, - dtype=self.dtype) for i in range(self.train_batch_size)] + roi_align_index = [np.array( + np.ones((config_tensor_init.num_expected_pos_stage2 + config_tensor_init.num_expected_neg_stage2, 1)) * i, + dtype=self.dtype) for i in range(self.train_batch_size)] - roi_align_index_test = [np.array(np.ones((config.rpn_max_num, 1)) * i, dtype=self.dtype) + roi_align_index_test = [np.array(np.ones((config_tensor_init.rpn_max_num, 1)) * i, dtype=self.dtype) for i in range(self.test_batch_size)] self.roi_align_index_tensor = Tensor(np.concatenate(roi_align_index)) diff --git a/vega/networks/mindspore/faster_rcnn/fpn_neck.py b/vega/networks/mindspore/faster_rcnn/fpn_neck.py index c9a1dbb..32b0ea2 100644 --- a/vega/networks/mindspore/faster_rcnn/fpn_neck.py +++ b/vega/networks/mindspore/faster_rcnn/fpn_neck.py @@ -1,12 +1,21 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# 2021.11.29-Changed for SPNAS. +#      Huawei Technologies Co., Ltd. +# Copyright 2021 Huawei Technologies Co., Ltd. +# ============================================================================ """FasterRcnn feature pyramid network.""" import numpy as np @@ -69,7 +78,8 @@ def __init__(self, self.fpn_layer = len(self.in_channels) self.code = code - assert not self.num_outs < len(in_channels) + if self.num_outs < len(in_channels): + raise ValueError('Num of outs must be bigger than length of inchannels.') self.lateral_convs_list_ = [] self.fpn_convs_ = [] diff --git a/vega/networks/mindspore/faster_rcnn/proposal_generator.py b/vega/networks/mindspore/faster_rcnn/proposal_generator.py index dc85c5a..ce7c03f 100644 --- a/vega/networks/mindspore/faster_rcnn/proposal_generator.py +++ b/vega/networks/mindspore/faster_rcnn/proposal_generator.py @@ -1,12 +1,21 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# 2021.11.29-Changed for SPNAS. +#      Huawei Technologies Co., Ltd. +# Copyright 2021 Huawei Technologies Co., Ltd. +# ============================================================================ """FasterRcnn proposal generator.""" import numpy as np diff --git a/vega/networks/mindspore/faster_rcnn/rcnn.py b/vega/networks/mindspore/faster_rcnn/rcnn.py index 40ca3c0..a72cd86 100644 --- a/vega/networks/mindspore/faster_rcnn/rcnn.py +++ b/vega/networks/mindspore/faster_rcnn/rcnn.py @@ -1,12 +1,17 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ """FasterRcnn Rcnn network.""" import numpy as np diff --git a/vega/networks/mindspore/faster_rcnn/resnet.py b/vega/networks/mindspore/faster_rcnn/resnet.py index f5e7eea..6bf0bf2 100644 --- a/vega/networks/mindspore/faster_rcnn/resnet.py +++ b/vega/networks/mindspore/faster_rcnn/resnet.py @@ -1,12 +1,21 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# 2021.11.29-Changed for SPNAS. +#      Huawei Technologies Co., Ltd. +# Copyright 2021 Huawei Technologies Co., Ltd. +# ============================================================================ """Resnet backbone.""" import numpy as np diff --git a/vega/networks/mindspore/faster_rcnn/resnet50v1.py b/vega/networks/mindspore/faster_rcnn/resnet50v1.py index a8759d2..af20cc2 100644 --- a/vega/networks/mindspore/faster_rcnn/resnet50v1.py +++ b/vega/networks/mindspore/faster_rcnn/resnet50v1.py @@ -1,12 +1,17 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ """Resnet50v1.0 backbone.""" import numpy as np @@ -195,11 +200,9 @@ def __init__(self, self.affine = weights_update out_chls = out_channels // self.expansion - # self.conv1 = _conv(in_channels, out_chls, kernel_size=1, stride=1, padding=0) self.conv1 = _conv(in_channels, out_chls, kernel_size=1, stride=stride, padding=0) self.bn1 = _BatchNorm2dInit(out_chls, momentum=momentum, affine=self.affine, use_batch_statistics=training) - # self.conv2 = _conv(out_chls, out_chls, kernel_size=3, stride=stride, padding=1) self.conv2 = _conv(out_chls, out_chls, kernel_size=3, stride=1, padding=1) self.bn2 = _BatchNorm2dInit(out_chls, momentum=momentum, affine=self.affine, use_batch_statistics=training) diff --git a/vega/networks/mindspore/faster_rcnn/roi_align.py b/vega/networks/mindspore/faster_rcnn/roi_align.py index 4e032f6..fc89e96 100644 --- a/vega/networks/mindspore/faster_rcnn/roi_align.py +++ b/vega/networks/mindspore/faster_rcnn/roi_align.py @@ -1,12 +1,17 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ """FasterRcnn ROIAlign module.""" import numpy as np diff --git a/vega/networks/mindspore/faster_rcnn/rpn.py b/vega/networks/mindspore/faster_rcnn/rpn.py index 852a2f0..4f85d22 100644 --- a/vega/networks/mindspore/faster_rcnn/rpn.py +++ b/vega/networks/mindspore/faster_rcnn/rpn.py @@ -1,12 +1,17 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# Copyright 2020-2021 Huawei Technologies Co., Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================ """RPN for fasterRCNN.""" import numpy as np import mindspore.nn as nn diff --git a/vega/networks/mindspore/losses/mix_auxiliary_loss.py b/vega/networks/mindspore/losses/mix_auxiliary_loss.py index 75144b6..f0b0b9d 100644 --- a/vega/networks/mindspore/losses/mix_auxiliary_loss.py +++ b/vega/networks/mindspore/losses/mix_auxiliary_loss.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Mix Auxiliary Loss.""" import mindspore.nn as nn diff --git a/vega/networks/mindspore/super_network.py b/vega/networks/mindspore/super_network.py index 420f777..40995c2 100644 --- a/vega/networks/mindspore/super_network.py +++ b/vega/networks/mindspore/super_network.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """CARS and DARTS network.""" diff --git a/vega/networks/mobilenet.py b/vega/networks/mobilenet.py index b262ab4..c16c181 100644 --- a/vega/networks/mobilenet.py +++ b/vega/networks/mobilenet.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for blocks.""" from vega.modules.module import Module diff --git a/vega/networks/mobilenetv3.py b/vega/networks/mobilenetv3.py index 8df81ec..4dac3e0 100644 --- a/vega/networks/mobilenetv3.py +++ b/vega/networks/mobilenetv3.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for mobilenetv3.""" import math diff --git a/vega/networks/model_config.py b/vega/networks/model_config.py index 6de852f..9cf8ac0 100644 --- a/vega/networks/model_config.py +++ b/vega/networks/model_config.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Conf for Pipeline.""" import os diff --git a/vega/networks/mtm_sr.py b/vega/networks/mtm_sr.py index d5e2ecb..a1d2c15 100644 --- a/vega/networks/mtm_sr.py +++ b/vega/networks/mtm_sr.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ResNet models for sr_ea.""" import logging diff --git a/vega/networks/necks.py b/vega/networks/necks.py index 8a1451d..0935c34 100644 --- a/vega/networks/necks.py +++ b/vega/networks/necks.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ResNet models for detection.""" from vega.common.class_factory import ClassFactory, ClassType @@ -96,7 +102,8 @@ def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, self.stride = stride self.dilation = dilation self.style = style - assert not with_cp + if with_cp: + raise ValueError('With_cp must be False.') def call(self, x): """Forward compute. @@ -128,7 +135,8 @@ def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, style='pytorch', with_cp=False): """Init Bottleneck.""" super(Bottleneck, self).__init__() - assert style in ['pytorch', 'caffe'] + if style not in ['pytorch', 'caffe']: + raise ValueError('unknown style: %s' % repr(style)) self.inplanes = inplanes self.planes = planes self.stride = stride diff --git a/vega/networks/network_desc.py b/vega/networks/network_desc.py index a071975..1ddb759 100644 --- a/vega/networks/network_desc.py +++ b/vega/networks/network_desc.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined NetworkDesc.""" import logging diff --git a/vega/networks/pytorch/__init__.py b/vega/networks/pytorch/__init__.py index 87cae7d..f763227 100644 --- a/vega/networks/pytorch/__init__.py +++ b/vega/networks/pytorch/__init__.py @@ -1,10 +1,8 @@ -from .backbones import * -from .heads import * -from .blocks import * -from .customs import * -from .detectors import * -from .necks import * -from .losses import * -from .cyclesrbodys import * -from .gan import * -# from .transformer import * +from . import backbones +from . import heads +from . import blocks +from . import customs +from . import detectors +from . import necks +from . import losses +from . import cyclesrbodys diff --git a/vega/networks/pytorch/backbones/__init__.py b/vega/networks/pytorch/backbones/__init__.py index 1f58431..46949db 100644 --- a/vega/networks/pytorch/backbones/__init__.py +++ b/vega/networks/pytorch/backbones/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import pytorch backbones.""" diff --git a/vega/networks/pytorch/backbones/backbone_tools.py b/vega/networks/pytorch/backbones/backbone_tools.py index b1c286c..bae6f6d 100644 --- a/vega/networks/pytorch/backbones/backbone_tools.py +++ b/vega/networks/pytorch/backbones/backbone_tools.py @@ -1,19 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Some function need in spnet.py.""" -import torch.nn as nn import os.path as osp -import torch from collections import OrderedDict +import torch.nn as nn +import torch def dirac_init(module, bias=0): @@ -29,7 +35,7 @@ def dirac_init(module, bias=0): nn.init.constant_(module.bias, bias) -def load_state_dict(module, state_dict, mapping, logger=None, mb_mapping=None): # noqa: C901 +def load_state_dict(module, state_dict, mapping, logger=None, mb_mapping=None): """Load state_dict to a module. This method is modified from :meth:`torch.nn.Module.load_state_dict`. :param module: Module that receives the state_dict @@ -60,7 +66,6 @@ def load_state_dict(module, state_dict, mapping, logger=None, mb_mapping=None): continue name = mapping[name] if isinstance(param, torch.nn.Parameter): - # backwards compatibility for serialized parameters param = param.data if param.size() != own_state[name].size(): shape_mismatch_pairs.append( @@ -73,14 +78,6 @@ def load_state_dict(module, state_dict, mapping, logger=None, mb_mapping=None): own_state[mb_name].copy_(param) mb_keys.append(mb_name) - # all_missing_keys = set(own_state.keys()) - set(state_dict.keys() - # ) - set(mapping.values()) - set(mb_keys) - # unexpected_keys = set(unexpected_keys) - set(mb_keys) - # ignore "num_batches_tracked" of BN layers - # missing_keys = [ - # key for key in all_missing_keys if 'num_batches_tracked' not in key - # ] - def load_checkpoint(model, filename, @@ -105,11 +102,9 @@ def load_checkpoint(model, :return: checkpoint :rtype: checkpoint """ - # load checkpoint from file if not osp.isfile(filename): raise IOError('{} is not a checkpoint file'.format(filename)) checkpoint = torch.load(filename, map_location=map_location) - # get state_dict from checkpoint if isinstance(checkpoint, OrderedDict): state_dict = checkpoint elif isinstance(checkpoint, dict) and 'weight' in checkpoint: @@ -117,10 +112,8 @@ def load_checkpoint(model, else: raise RuntimeError( 'No state_dict found in checkpoint file {}'.format(filename)) - # strip prefix of state_dict if list(state_dict.keys())[0].startswith('module.'): state_dict = {k[7:]: v for k, v in checkpoint['state_dict'].items()} - # load state_dict if hasattr(model, 'module'): load_state_dict(model.module, state_dict, pretrain_to_own, logger, mb_mapping) @@ -197,15 +190,12 @@ def match_name(own_names, checkpoint_names): """ pretrain_to_own = dict() print("matching pretrained model with new distributed architecture....") - # build mapping1: blocks id to own name bid_to_own = [] for name in own_names: if name.find('layer') >= 0: block_name = '.'.join(name.split('.')[:2]) if block_name not in bid_to_own: bid_to_own.append(block_name) - - # map pretrained name to own name flag = '' bid = -1 for name in checkpoint_names: diff --git a/vega/networks/pytorch/backbones/getter.py b/vega/networks/pytorch/backbones/getter.py index 416332e..e60fa7d 100644 --- a/vega/networks/pytorch/backbones/getter.py +++ b/vega/networks/pytorch/backbones/getter.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ResNetVariant for Detection.""" from collections import OrderedDict diff --git a/vega/networks/pytorch/backbones/load_official_model.py b/vega/networks/pytorch/backbones/load_official_model.py index 211798c..d5ebf03 100644 --- a/vega/networks/pytorch/backbones/load_official_model.py +++ b/vega/networks/pytorch/backbones/load_official_model.py @@ -1,18 +1,24 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Load the official model from torchvision.""" -from vega.common import ClassType, ClassFactory +import operator from official_model_conf import output_layer_map from torch import nn -import operator +from vega.common import ClassType, ClassFactory @ClassFactory.register(ClassType.NETWORK) diff --git a/vega/networks/pytorch/backbones/official_model_conf.py b/vega/networks/pytorch/backbones/official_model_conf.py index a56081d..27f2b88 100644 --- a/vega/networks/pytorch/backbones/official_model_conf.py +++ b/vega/networks/pytorch/backbones/official_model_conf.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Load the offical model from torchvision.""" diff --git a/vega/networks/pytorch/backbones/resnet_variant_det.py b/vega/networks/pytorch/backbones/resnet_variant_det.py index c8b2ccc..596958d 100644 --- a/vega/networks/pytorch/backbones/resnet_variant_det.py +++ b/vega/networks/pytorch/backbones/resnet_variant_det.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ResNetVariant for Detection.""" import torch.nn as nn diff --git a/vega/networks/pytorch/backbones/resnext_variant_det.py b/vega/networks/pytorch/backbones/resnext_variant_det.py index f379121..9fe60c7 100644 --- a/vega/networks/pytorch/backbones/resnext_variant_det.py +++ b/vega/networks/pytorch/backbones/resnext_variant_det.py @@ -1,16 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ResNeXtVariant for Detection.""" -import torch.nn as nn import math +import torch.nn as nn from vega.common import ClassType, ClassFactory from .resnet_variant_det import Bottleneck as _Bottleneck from .resnet_variant_det import BasicBlock as _BasicBlock diff --git a/vega/networks/pytorch/backbones/torch2vega.py b/vega/networks/pytorch/backbones/torch2vega.py index 162224d..edba82d 100644 --- a/vega/networks/pytorch/backbones/torch2vega.py +++ b/vega/networks/pytorch/backbones/torch2vega.py @@ -1,22 +1,28 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Transform torchvision model to vega.""" +from collections import OrderedDict import torch.nn as nn import torchvision -from vega.modules.module import Module -from collections import OrderedDict from vega.modules.operators import ops from vega.networks.necks import Bottleneck, BasicBlock from vega.modules.connections import Sequential from vega.common import ClassType, ClassFactory +from vega.modules.module import Module atom_op = (nn.Conv2d, nn.BatchNorm2d, nn.ReLU, nn.LeakyReLU, nn.MaxPool2d, nn.AvgPool2d, nn.AdaptiveAvgPool2d, nn.Linear, nn.Dropout) @@ -31,7 +37,6 @@ def _transsorm_op(init_layer): kernel_size = init_layer.kernel_size stride = init_layer.stride padding = init_layer.padding - # bias = init_layer.bias new_layer = ops.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding) elif isinstance(init_layer, nn.BatchNorm2d): @@ -55,7 +60,6 @@ def _transsorm_op(init_layer): elif isinstance(init_layer, nn.Linear): in_features = init_layer.in_features out_features = init_layer.out_features - # use_bias = init_layer.bias new_layer = ops.Linear(in_features=in_features, out_features=out_features) elif isinstance(init_layer, nn.Dropout): prob = init_layer.p @@ -93,7 +97,6 @@ def _transfowm_model(model): """Transform the torch model to Vega model.""" new_model_dict = OrderedDict() for name, module in model.named_children(): - # print("name:", name, "module:", module, "type:", type(module)) if isinstance(module, atom_op): new_model_dict[name] = _transsorm_op(module) diff --git a/vega/networks/pytorch/blocks/__init__.py b/vega/networks/pytorch/blocks/__init__.py index b5707f5..12bcfe1 100644 --- a/vega/networks/pytorch/blocks/__init__.py +++ b/vega/networks/pytorch/blocks/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import pytorch blocks.""" diff --git a/vega/networks/pytorch/blocks/block.py b/vega/networks/pytorch/blocks/block.py index 33185ff..f1b6629 100644 --- a/vega/networks/pytorch/blocks/block.py +++ b/vega/networks/pytorch/blocks/block.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined Block.""" from vega.modules.module import Module diff --git a/vega/networks/pytorch/blocks/conv_module.py b/vega/networks/pytorch/blocks/conv_module.py index 996fc3b..fdfce20 100644 --- a/vega/networks/pytorch/blocks/conv_module.py +++ b/vega/networks/pytorch/blocks/conv_module.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Conv Module with Normalization.""" @@ -34,8 +40,8 @@ def __init__(self, dilation=1, groups=1, bias='auto', - conv_cfg={"type": 'Conv'}, - norm_cfg={"type": 'BN'}, + conv_cfg=None, + norm_cfg=None, activation='relu', inplace=True, activate_last=True): @@ -69,36 +75,46 @@ def __init__(self, :type activate_last: bool """ super(ConvModule, self).__init__() - assert conv_cfg is None or isinstance(conv_cfg, dict) - assert norm_cfg is None or isinstance(norm_cfg, dict) - self.conv_cfg = conv_cfg - self.norm_cfg = norm_cfg - self.activation = activation - self.inplace = inplace - self.activate_last = activate_last - self.with_norm = norm_cfg is not None - self.with_activatation = activation is not None - if bias == 'auto': - bias = False if self.with_norm else True - self.with_bias = bias - self.conv = conv_cfg_dict[self.conv_cfg['type']]( - in_channels, - out_channels, - kernel_size, - stride=stride, - padding=padding, - dilation=dilation, - groups=groups, - bias=bias) - self.in_channels = self.conv.in_channels - self.out_channels = self.conv.out_channels - self.kernel_size = self.conv.kernel_size - self.stride = self.conv.stride - self.padding = self.conv.padding - self.dilation = self.conv.dilation - self.transposed = self.conv.transposed - self.output_padding = self.conv.output_padding - self.groups = self.conv.groups + if conv_cfg is None: + conv_cfg = {"type": 'Conv'} + if norm_cfg is None: + norm_cfg = {"type": 'BN'} + if (conv_cfg is None or isinstance(conv_cfg, dict)) and (norm_cfg is None or isinstance(norm_cfg, dict)): + self.conv_cfg = conv_cfg + self.norm_cfg = norm_cfg + self.activation = activation + self.inplace = inplace + self.activate_last = activate_last + self.with_norm = norm_cfg is not None + self.with_activatation = activation is not None + if bias == 'auto': + bias = False if self.with_norm else True + self.with_bias = bias + self.conv = conv_cfg_dict[self.conv_cfg['type']]( + in_channels, + out_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias) + self.in_channels = self.conv.in_channels + self.out_channels = self.conv.out_channels + self.kernel_size = self.conv.kernel_size + self.stride = self.conv.stride + self.padding = self.conv.padding + self.dilation = self.conv.dilation + self.transposed = self.conv.transposed + self.output_padding = self.conv.output_padding + self.groups = self.conv.groups + self._network_setting(in_channels, out_channels, inplace) + self.init_weight() + else: + raise ValueError('Failed to init ConvModule.') + + def _network_setting(self, in_channels, out_channels, inplace): + """Set network.""" if self.with_norm: norm_channels = out_channels if self.activate_last else in_channels requires_grad = self.norm_cfg['requires_grad'] if 'requires_grad' in self.norm_cfg else False @@ -114,7 +130,6 @@ def __init__(self, self.activation)) if self.activation == 'relu': self.activate = nn.ReLU(inplace=inplace) - self.init_weight() def init_weight(self): """Init weight of Conv Module with Normalization.""" diff --git a/vega/networks/pytorch/blocks/conv_ws.py b/vega/networks/pytorch/blocks/conv_ws.py index 617d774..08e3908 100644 --- a/vega/networks/pytorch/blocks/conv_ws.py +++ b/vega/networks/pytorch/blocks/conv_ws.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """conv weight standarlization.""" import torch.nn as nn diff --git a/vega/networks/pytorch/blocks/layer_creator.py b/vega/networks/pytorch/blocks/layer_creator.py index 605c5df..b84e531 100644 --- a/vega/networks/pytorch/blocks/layer_creator.py +++ b/vega/networks/pytorch/blocks/layer_creator.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Layer Creator.""" import torch.nn as nn diff --git a/vega/networks/pytorch/blocks/stem.py b/vega/networks/pytorch/blocks/stem.py index eaa87b7..6013c07 100644 --- a/vega/networks/pytorch/blocks/stem.py +++ b/vega/networks/pytorch/blocks/stem.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for preprocess.""" import torch.nn as nn diff --git a/vega/networks/pytorch/customs/__init__.py b/vega/networks/pytorch/customs/__init__.py index bf5a741..3b4dfbe 100644 --- a/vega/networks/pytorch/customs/__init__.py +++ b/vega/networks/pytorch/customs/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import custom network.""" @@ -21,5 +27,4 @@ "bisenet": ["network:BiSeNet"], "modnas": ["network:ModNasArchSpace"], "mobilenetv2": ["network:MobileNetV2"], - "gcn_regressor": ["network:GCNRegressor"], }) diff --git a/vega/networks/pytorch/customs/autogate.py b/vega/networks/pytorch/customs/autogate.py index d44579a..6d682a7 100644 --- a/vega/networks/pytorch/customs/autogate.py +++ b/vega/networks/pytorch/customs/autogate.py @@ -1,19 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The Autogate model.""" import logging import copy +from vega.common import ClassType, ClassFactory from .deepfm import DeepFactorizationMachineModel from .fis.layers import NormalizedWeightedFMLayer -from vega.common import ClassType, ClassFactory @ClassFactory.register(ClassType.NETWORK) diff --git a/vega/networks/pytorch/customs/autogroup.py b/vega/networks/pytorch/customs/autogroup.py index 29acd91..0dbf9f0 100644 --- a/vega/networks/pytorch/customs/autogroup.py +++ b/vega/networks/pytorch/customs/autogroup.py @@ -1,16 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """AutoGroup model file.""" import logging -import torch import copy +import torch from vega.common import ClassType, ClassFactory from .fis.layers import LinearLayer, EmbeddingLayer, MultiLayerPerceptron, FeatureGroupLayer @@ -80,7 +86,8 @@ def __init__(self, **kwargs): embed_dims = self.desc['embed_dims'] bucket_nums = self.desc['bucket_nums'] max_order = self.desc['max_order'] - assert len(embed_dims) == len(bucket_nums) == max_order + if not len(embed_dims) == len(bucket_nums) == max_order: + raise ValueError('Failed to automatic Feature Grouping.') self.linear = LinearLayer(self.desc['input_dim']) self.max_order = max_order for i in range(max_order): diff --git a/vega/networks/pytorch/customs/bisenet.py b/vega/networks/pytorch/customs/bisenet.py index 34efd2c..9ac02ce 100644 --- a/vega/networks/pytorch/customs/bisenet.py +++ b/vega/networks/pytorch/customs/bisenet.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The BiSeNet model.""" import torch.nn as nn import torch.nn.functional as F -from .segmentation.common import AttentionRefinement, FeatureFusion -from .segmentation.evolveresnet import build_archs, build_spatial_path -from .segmentation.weights import init_weight from vega.common import ClassType, ClassFactory from vega.modules.module import Module from vega.modules.operators import ConvBnRelu +from .segmentation.common import AttentionRefinement, FeatureFusion +from .segmentation.evolveresnet import build_archs, build_spatial_path +from .segmentation.weights import init_weight @ClassFactory.register(ClassType.NETWORK) @@ -33,7 +39,6 @@ def __init__(self, **desc): self.conv_channel = desc['conv_channel'] self.norm_layer = desc['norm_layer'] self.backone_args = desc['backbone_args'] - # self.lr = desc['lr'] self.encoding = desc['config'] norm_type = self.norm_layer['norm_type'] if norm_type == 'GN': diff --git a/vega/networks/pytorch/customs/deepfm.py b/vega/networks/pytorch/customs/deepfm.py index 77fabc8..e0c31db 100644 --- a/vega/networks/pytorch/customs/deepfm.py +++ b/vega/networks/pytorch/customs/deepfm.py @@ -1,19 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The DeepFM model.""" -import torch import copy +import torch +from vega.common import ClassType, ClassFactory from .fis.layers import LinearLayer, EmbeddingLayer, \ FactorizationMachineLayer, MultiLayerPerceptron -from vega.common import ClassType, ClassFactory @ClassFactory.register(ClassType.NETWORK) @@ -68,8 +74,4 @@ def forward(self, feature_id): embed_v = self.embedding(feature_id, feature_val) fm_score = self.fm(embed_v).squeeze(1) mlp_score = self.mlp(embed_v.view(-1, self.mlp_input_dim)).squeeze(1) - - # print("linear_score:",linear_score.size()) - # print("fm_score:", fm_score.size()) - # print("mlp_score:", mlp_score.size()) return linear_score + fm_score + mlp_score diff --git a/vega/networks/pytorch/customs/fis/layers.py b/vega/networks/pytorch/customs/fis/layers.py index f9df378..72385e3 100644 --- a/vega/networks/pytorch/customs/fis/layers.py +++ b/vega/networks/pytorch/customs/fis/layers.py @@ -1,4 +1,19 @@ -# -*- coding: utf-8 -*- +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + """ Common modules in ctr prediction task. @@ -98,7 +113,6 @@ def __init__(self, input_dim, embed_dim): :type embed_dim: int """ super(EmbeddingLayer, self).__init__() - # todo: add padding_idx = 0 self.embedding = torch.nn.Embedding(input_dim, embed_dim) torch.nn.init.xavier_uniform_(self.embedding.weight.data) @@ -118,7 +132,7 @@ def forward(self, feature_id, feature_val=None): class FactorizationMachineLayer(torch.nn.Module): - """Factorization Machines module. https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf. + """Factorization Machines module. :param reduce_sum: whether to sum interaction score of all feature pairs, defaults to `True` :type reduce_sum: bool, optional diff --git a/vega/networks/pytorch/customs/gcn_regressor.py b/vega/networks/pytorch/customs/gcn_regressor.py deleted file mode 100644 index 1bd95b0..0000000 --- a/vega/networks/pytorch/customs/gcn_regressor.py +++ /dev/null @@ -1,101 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""The Graph Convolution Network model.""" -import logging -import math -import torch -import torch.nn as nn -from torch.nn.parameter import Parameter -import torch.nn.functional as F -from vega.common import ClassType, ClassFactory - - -logger = logging.getLogger(__name__) - - -class GraphConvolution(nn.Module): - """Graph Convolution Layer.""" - - def __init__(self, in_features, out_features, bias=True): - super(GraphConvolution, self).__init__() - self.in_features = in_features - self.out_features = out_features - self.weight = Parameter(torch.FloatTensor(in_features, out_features)) - if bias: - self.bias = Parameter(torch.FloatTensor(out_features)) - else: - self.register_parameter('bias', None) - self.reset_parameters() - - def reset_parameters(self): - """Reset parameters of layer.""" - stdv = 1. / math.sqrt(self.weight.size(1)) - self.weight.data.uniform_(-stdv, stdv) - if self.bias is not None: - self.bias.data.uniform_(-stdv, stdv) - - def forward(self, input_, adj): - """Forward function of graph convolution layer.""" - support = torch.matmul(input_, self.weight) - output = torch.bmm(adj, support) - if self.bias is not None: - return output + self.bias - else: - return output - - -@ClassFactory.register(ClassType.NETWORK) -class GCNRegressor(nn.Module): - """Graph Convolution Network for regression.""" - - def __init__(self, nfeat, ifsigmoid, layer_size=64): - super(GCNRegressor, self).__init__() - self.ifsigmoid = ifsigmoid - self.size = layer_size - self.gc1 = GraphConvolution(nfeat, self.size) - self.gc2 = GraphConvolution(self.size, self.size) - self.gc3 = GraphConvolution(self.size, self.size) - self.gc4 = GraphConvolution(self.size, self.size) - self.bn1 = nn.BatchNorm1d(self.size) - self.bn2 = nn.BatchNorm1d(self.size) - self.bn3 = nn.BatchNorm1d(self.size) - self.bn4 = nn.BatchNorm1d(self.size) - self.sigmoid = nn.Sigmoid() - self.fc = nn.Linear(self.size, 1) - self.init_weights() - - def init_weights(self): - """Init parameters of each graph convolution layer in GCN.""" - nn.init.uniform_(self.gc1.weight, a=-0.05, b=0.05) - nn.init.uniform_(self.gc2.weight, a=-0.05, b=0.05) - nn.init.uniform_(self.gc3.weight, a=-0.05, b=0.05) - nn.init.uniform_(self.gc4.weight, a=-0.05, b=0.05) - - def forward(self, input): - """Forward function of GCN.""" - node_size = input.size()[1] - adj, feat = input[:, :, :node_size], input[:, :, node_size:] - x = F.relu(self.bn1(self.gc1(feat, adj).transpose(2, 1))) - x = x.transpose(1, 2) - x = F.relu(self.bn2(self.gc2(x, adj).transpose(2, 1))) - x = x.transpose(1, 2) - x = F.relu(self.bn3(self.gc3(x, adj).transpose(2, 1))) - x = x.transpose(1, 2) - x = F.relu(self.bn4(self.gc4(x, adj).transpose(2, 1))) - x = x.transpose(1, 2) - embeddings = x[:, x.size()[1] - 1, :] - x = self.fc(embeddings) - # if extract_embedding: - # return embeddings - if self.ifsigmoid: - return self.sigmoid(x) - else: - return x diff --git a/vega/networks/pytorch/customs/mobilenetv2.py b/vega/networks/pytorch/customs/mobilenetv2.py index 596dcdf..cf32ec4 100644 --- a/vega/networks/pytorch/customs/mobilenetv2.py +++ b/vega/networks/pytorch/customs/mobilenetv2.py @@ -1,12 +1,10 @@ # -*- coding:utf-8 -*- -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# This file is adapted from the torchvision library at +# https://github.com/pytorch/vision/blob/main/torchvision/models/mobilenetv2.py + +# 2021.07.04-Changed for vega search space +# Author: Nikita Klyuchnikov """MobileNetV2 architecture.""" @@ -19,8 +17,6 @@ def _make_divisible(v, divisor, min_value=None): Taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 - It can be seen here: - https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: @@ -55,7 +51,8 @@ def __init__(self, inp, oup, stride, expand_ratio, kernel_size=3): """Initialize InvertedResidual instance.""" super(InvertedResidual, self).__init__() self.stride = stride - assert stride in [1, 2] + if stride not in [1, 2]: + raise ValueError("Stride must be in [1,2].") hidden_dim = int(round(inp * expand_ratio)) self.use_res_connect = self.stride == 1 and inp == oup @@ -90,7 +87,7 @@ def __init__(self, width_mult=1.0, inverted_residual_setting=None, round_nearest=8, - block=None, kernels=[3] * 7, first_stride=1, last_channel=1280, desc=None): + block=None, kernels=None, first_stride=1, last_channel=1280, desc=None): """ Network MobileNet V2 main class. @@ -105,6 +102,8 @@ def __init__(self, """ super(MobileNetV2, self).__init__() + if kernels is None: + kernels = [3] * 7 if block is None: block = InvertedResidual input_channel = 32 @@ -148,7 +147,6 @@ def __init__(self, idx += 1 # building last several layers features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1)) - # self.list_features = nn.ModuleList(features) # make it nn.Sequential self.features = nn.Sequential(*features) @@ -183,10 +181,8 @@ def _forward_impl(self, x, is_feat): if i in [0, 2, 3, 5, 7]: kd_layers.append(x) - # x = self.features(x) f5 = x.mean([2, 3]) out = self.classifier(f5) - # kd_layers.append(f5) if is_feat: return kd_layers, out diff --git a/vega/networks/pytorch/customs/modnas/__init__.py b/vega/networks/pytorch/customs/modnas/__init__.py index 2598396..e039d5c 100644 --- a/vega/networks/pytorch/customs/modnas/__init__.py +++ b/vega/networks/pytorch/customs/modnas/__init__.py @@ -1,2 +1,2 @@ import vega.algorithms.nas.modnas.compat -from .compat import * +from .compat import ModNasArchSpace diff --git a/vega/networks/pytorch/customs/modnas/arch_space/construct/predefined/hparams.py b/vega/networks/pytorch/customs/modnas/arch_space/construct/predefined/hparams.py index 4b0cf72..1b39a4c 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/construct/predefined/hparams.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/construct/predefined/hparams.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Hyperparameter constructor.""" from typing import Dict, List, Union diff --git a/vega/networks/pytorch/customs/modnas/arch_space/construct/predefined/init.py b/vega/networks/pytorch/customs/modnas/arch_space/construct/predefined/init.py index 5022591..c88c08a 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/construct/predefined/init.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/construct/predefined/init.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Initialization constructor.""" import numpy as np diff --git a/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/arch_desc.py b/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/arch_desc.py index 65c6478..8a04341 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/arch_desc.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/arch_desc.py @@ -1,26 +1,32 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ArchDesc Constructors.""" import os -import yaml import json import copy -from .default import DefaultSlotTraversalConstructor +from typing import Dict, Optional, Any, Sequence +import yaml +from torch.nn.modules.module import Module from modnas.registry.arch_space import build as build_module from modnas.registry.construct import register from modnas.arch_space.slot import Slot from modnas.utils.logging import get_logger -from torch.nn.modules.module import Module -from typing import Dict, Optional, Any, Sequence - +from vega.security.args import path_verify +from .default import DefaultSlotTraversalConstructor logger = get_logger('construct') @@ -37,6 +43,8 @@ def parse_arch_desc(desc: Any, parser: Optional[str] = None) -> Any: if isinstance(desc, str): default_parser = 'yaml' if os.path.exists(desc): + desc = os.path.realpath(desc) + desc = path_verify(desc) _, ext = os.path.splitext(desc) default_parser = ext[1:].lower() with open(desc, 'r', encoding='UTF-8') as f: diff --git a/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/default.py b/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/default.py index 0ecfebf..e0f560a 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/default.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/default.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default Constructors.""" import importlib diff --git a/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/droppath.py b/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/droppath.py index 18a9a7c..2a9bb30 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/droppath.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/droppath.py @@ -1,23 +1,29 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """DropPath constructor.""" +from typing import Optional import torch +from torch.nn.modules.container import Sequential +from torch.nn.modules.module import Module from modnas.arch_space.ops import Identity from modnas.core.event import event_on -from .default import DefaultSlotTraversalConstructor from modnas.registry.construct import register from modnas.arch_space.slot import Slot -from torch.nn.modules.container import Sequential -from torch.nn.modules.module import Module -from typing import Optional +from .default import DefaultSlotTraversalConstructor class DropPath(torch.nn.Module): diff --git a/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/model_init.py b/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/model_init.py index 7c84953..63a2eea 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/model_init.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/model_init.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Model weight initializer.""" import copy diff --git a/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/torch.py b/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/torch.py index 36a4ae4..f75ae91 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/torch.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/construct/torch/torch.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Torch constructors.""" import torch diff --git a/vega/networks/pytorch/customs/modnas/arch_space/export/predefined/default.py b/vega/networks/pytorch/customs/modnas/arch_space/export/predefined/default.py index 1e2a4d7..c229ce2 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/export/predefined/default.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/export/predefined/default.py @@ -1,20 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default Architecture Exporters.""" import os import json +from typing import Any, Dict, List, Optional, Union import yaml from modnas.core.param_space import ParamSpace from modnas.registry.export import register, build -from typing import Any, Dict, List, Optional, Union @register diff --git a/vega/networks/pytorch/customs/modnas/arch_space/export/torch/arch_space.py b/vega/networks/pytorch/customs/modnas/arch_space/export/torch/arch_space.py index d0c827a..8fc26c6 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/export/torch/arch_space.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/export/torch/arch_space.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Torch Architecture Exporters.""" import copy diff --git a/vega/networks/pytorch/customs/modnas/arch_space/export/torch/torch.py b/vega/networks/pytorch/customs/modnas/arch_space/export/torch/torch.py index 9c8e8f8..0fae73f 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/export/torch/torch.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/export/torch/torch.py @@ -1,14 +1,21 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default Torch Exporters.""" +import logging import traceback import torch from modnas.registry.export import register @@ -34,6 +41,7 @@ def __call__(self, model): logger.info('Saving torch checkpoint to {}'.format(self.path)) try: torch.save(model.state_dict(), self.path, **self.save_kwargs) - except RuntimeError: - logger.error('Failed saving checkpoint: {}'.format(traceback.format_exc())) + except RuntimeError as e: + logger.debug(traceback.format_exc()) + logger.error(f'Failed saving checkpoint, message: {e}') return model diff --git a/vega/networks/pytorch/customs/modnas/arch_space/layer_defs.py b/vega/networks/pytorch/customs/modnas/arch_space/layer_defs.py index f5a57f0..dc104eb 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/layer_defs.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/layer_defs.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Dataflow defining components in Layers.""" -import torch from itertools import combinations -from torch import Tensor from typing import Iterator, List, Tuple, Union +import torch +from torch import Tensor from modnas.registry.layer_def import register diff --git a/vega/networks/pytorch/customs/modnas/arch_space/layers.py b/vega/networks/pytorch/customs/modnas/arch_space/layers.py index 35e4c50..4b34439 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/layers.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/layers.py @@ -1,24 +1,30 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Layers of nested network modules.""" +from typing import Dict, List, Optional, Tuple, Type, Union, Any import torch import torch.nn as nn +from torch import Tensor +from torch.nn.modules.module import Module +from modnas.registry.layer_def import build as build_layer_def +from modnas.utils.logging import get_logger from .slot import Slot from .slot import register_slot_ccs from . import layer_defs -from modnas.registry.layer_def import build as build_layer_def -from modnas.utils.logging import get_logger -from torch import Tensor -from torch.nn.modules.module import Module -from typing import Dict, List, Optional, Tuple, Type, Union, Any logger = get_logger('arch_space') @@ -202,7 +208,8 @@ def __init__(self, if isinstance(n_chain_nodes, int): n_chain_nodes = [n_chain_nodes] * n_chain else: - assert len(n_chain_nodes) == n_chain + if len(n_chain_nodes) != n_chain: + raise ValueError("Chains of network modules are wrong.") self.n_chain_nodes = n_chain_nodes self.n_nodes = sum(n_chain_nodes) self.n_input = len(chn_in) @@ -271,8 +278,10 @@ def to_arch_desc(self, *args, **kwargs): def build_from_arch_desc(self, desc, *args, **kwargs): """Build layer ops from desc.""" - assert len(desc) == len(self.chains) + if len(desc) != len(self.chains): + raise ValueError('Failed to build layer ops from desc.') for g_chain, chain in zip(desc, self.chains): - assert len(g_chain) == len(chain) + if len(g_chain) != len(chain): + raise ValueError('Failed to build layer ops from desc.') for g_edge, e in zip(g_chain, chain): e.build_from_arch_desc(g_edge, *args, **kwargs) diff --git a/vega/networks/pytorch/customs/modnas/arch_space/mixed_ops.py b/vega/networks/pytorch/customs/modnas/arch_space/mixed_ops.py index 72233b2..5ee97b7 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/mixed_ops.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/mixed_ops.py @@ -1,20 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Mixed operators.""" +from collections import OrderedDict +from typing import Any, Collection, Iterator, List, Tuple, Optional, Union import torch import torch.nn as nn import torch.nn.functional as F -from collections import OrderedDict from torch import Tensor -from typing import Any, Collection, Iterator, List, Tuple, Optional, Union from torch.nn.modules.module import Module from modnas.core.params.base import Param from modnas.core.params import Categorical @@ -246,7 +252,6 @@ def sample_path(self) -> None: p = self.alpha() s_op = self.s_op self.w_path_f = F.softmax(p.index_select(-1, torch.tensor(s_op).to(p.device)), dim=-1) - # sample uniformly samples = F.softmax(torch.ones(len(s_op)), dim=-1).multinomial(self.n_samples) s_path_f = [s_op[i] for i in samples] self.s_path_f = s_path_f @@ -254,10 +259,7 @@ def sample_path(self) -> None: def sample_ops(self, n_samples: int) -> None: """Sample activated candidates.""" p = self.alpha() - # sample uniformly samples = F.softmax(torch.ones(p.shape), dim=-1).multinomial(n_samples).detach() - # sample according to p - # samples = F.softmax(p, dim=-1).multinomial(n_samples).detach() self.s_op = list(samples.flatten().cpu().numpy()) @@ -291,7 +293,7 @@ def forward(self, *args, **kwargs) -> Union[Tensor, int]: def to_arch_desc(self, k: int = 1) -> Any: """Return archdesc from mixed operator.""" cname = self.candidate_names() - w = F.softmax(self.alpha().detach(), dim=-1) # use alpha softmax + w = F.softmax(self.alpha().detach(), dim=-1) _, cand_idx = torch.topk(w, k) desc = [cname[i] for i in cand_idx] if desc == []: diff --git a/vega/networks/pytorch/customs/modnas/arch_space/ops.py b/vega/networks/pytorch/customs/modnas/arch_space/ops.py index 114122c..404a769 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/ops.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/ops.py @@ -1,21 +1,29 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Network operators / candidates.""" + from typing import Any, List import torch import torch.nn as nn +from torch import Tensor from modnas.utils import get_same_padding from modnas.utils.config import Config from .slot import register_slot_ccs -from torch import Tensor + register_slot_ccs(lambda C_in, C_out, stride: PoolBN('avg', C_in, C_out, 3, stride, 1), 'AVG') register_slot_ccs(lambda C_in, C_out, stride: PoolBN('max', C_in, C_out, 3, stride, 1), 'MAX') @@ -212,7 +220,6 @@ def forward(self, x: Tensor) -> Tensor: """Return operator output.""" if self.stride == 1: return x * 0. - # re-sizing by stride return x[:, :, ::self.stride, ::self.stride] * 0. diff --git a/vega/networks/pytorch/customs/modnas/arch_space/predefined/constructed.py b/vega/networks/pytorch/customs/modnas/arch_space/predefined/constructed.py index ad14f21..dd723ed 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/predefined/constructed.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/predefined/constructed.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Constructed modules.""" from modnas.registry.construct import build as build_constructor diff --git a/vega/networks/pytorch/customs/modnas/arch_space/slot.py b/vega/networks/pytorch/customs/modnas/arch_space/slot.py index a6b41a3..6b376a2 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/slot.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/slot.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Slot module.""" from functools import partial diff --git a/vega/networks/pytorch/customs/modnas/arch_space/torch/darts.py b/vega/networks/pytorch/customs/modnas/arch_space/torch/darts.py index d07e2bf..8ddf345 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/torch/darts.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/torch/darts.py @@ -1,16 +1,23 @@ -"""Cell-based architecture in DARTS. +# -*- coding:utf-8 -*- + +# This file is adapted from the pt.darts library at +# https://github.com/khanrc/pt.darts + +# 2020.6.29-Changed for Modular-NAS search space. +# Huawei Technologies Co., Ltd. +# Copyright 2020 Huawei Technologies Co., Ltd. + +"""Cell-based architecture in DARTS.""" -modified from https://github.com/khanrc/pt.darts -""" from functools import partial import torch.nn as nn -from ..ops import FactorizedReduce, StdConv -from ..slot import Slot from modnas.registry.construct import DefaultMixedOpConstructor from modnas.registry.construct import register as register_constructor from modnas.registry.arch_space import register from modnas.registry import parse_spec, to_spec from ..layers import DAGLayer +from ..ops import FactorizedReduce, StdConv +from ..slot import Slot class PreprocLayer(StdConv): @@ -25,7 +32,8 @@ class AuxiliaryHead(nn.Module): def __init__(self, input_size, C, n_classes): # assuming input size 7x7 or 8x8 - assert input_size in [7, 8] + if input_size not in [7, 8]: + raise ValueError('unknown input_size: %s' % repr(input_size)) super().__init__() self.net = nn.Sequential( nn.ReLU(inplace=True), @@ -120,10 +128,12 @@ def forward_aux(self, x): def build_from_arch_desc(self, desc, *args, **kwargs): """Build network from archdesc.""" - assert len(self.cell_group) == len(desc) - for cells, g in zip(self.cell_group, desc): - for c in cells: - c.build_from_arch_desc(g, *args, **kwargs) + if len(self.cell_group) == len(desc): + for cells, g in zip(self.cell_group, desc): + for c in cells: + c.build_from_arch_desc(g, *args, **kwargs) + else: + raise ValueError('Failed to build network from archdesc.') def to_arch_desc(self, k=2): """Return archdesc from parameters.""" diff --git a/vega/networks/pytorch/customs/modnas/arch_space/torch/mobilenetv2.py b/vega/networks/pytorch/customs/modnas/arch_space/torch/mobilenetv2.py index db1b929..722e3c6 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/torch/mobilenetv2.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/torch/mobilenetv2.py @@ -1,7 +1,14 @@ -"""MobileNetV2 architectures. +# -*- coding:utf-8 -*- + +# This file is adapted from the MobileNetV2-pytorch library at +# https://github.com/Randl/MobileNetV2-pytorch + +# 2020.6.29-Changed for Modular-NAS search space. +# Huawei Technologies Co., Ltd. +# Copyright 2020 Huawei Technologies Co., Ltd. + +"""MobileNetV2 architectures.""" -modified from https://github.com/Randl/MobileNetV2-pytorch -""" import math from functools import partial from collections import OrderedDict @@ -257,10 +264,10 @@ def mobilenetv2(cfgs=None, cifar=False, **kwargs): return MobileNetV2(cfgs=cfgs, **kwargs) -for cifar in [True, False]: - img = 'CIFAR' if cifar else 'ImageNet' - register(partial(mobilenetv2, cifar=cifar), '{}_MobileNetV2'.format(img)) - register(partial(mobilenetv2, cfgs=_mbv2_gpu_cfgs, cifar=cifar), '{}_MobileNetV2_GPU'.format(img)) +for cifar_format in [True, False]: + img = 'CIFAR' if cifar_format else 'ImageNet' + register(partial(mobilenetv2, cifar=cifar_format), '{}_MobileNetV2'.format(img)) + register(partial(mobilenetv2, cfgs=_mbv2_gpu_cfgs, cifar=cifar_format), '{}_MobileNetV2_GPU'.format(img)) kernel_sizes = [3, 5, 7, 9] diff --git a/vega/networks/pytorch/customs/modnas/arch_space/torch/mobilenetv3.py b/vega/networks/pytorch/customs/modnas/arch_space/torch/mobilenetv3.py index 219dc99..f5bb3ae 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/torch/mobilenetv3.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/torch/mobilenetv3.py @@ -1,7 +1,14 @@ -"""MobileNetV3 architectures. +# -*- coding:utf-8 -*- + +# This file is adapted from the mobilenetv3.pytorch library at +# https://github.com/d-li14/mobilenetv3.pytorch + +# 2020.6.29-Changed for Modular-NAS search space. +# Huawei Technologies Co., Ltd. +# Copyright 2020 Huawei Technologies Co., Ltd. + +"""MobileNetV3 architectures.""" -modified from https://github.com/d-li14/mobilenetv3.pytorch -""" import torch.nn as nn import torch.nn.functional as F from modnas.registry.construct import DefaultMixedOpConstructor, DefaultSlotTraversalConstructor,\ @@ -121,7 +128,8 @@ class MobileInvertedResidualBlock(nn.Module): def __init__(self, chn_in, chn, chn_out, kernel_size, stride, use_se, use_hs): super(MobileInvertedResidualBlock, self).__init__() - assert stride in [1, 2] + if stride not in [1, 2]: + raise ValueError('unknown stride: %s' % repr(stride)) self.identity = stride == 1 and chn_in == chn_out self.conv = Slot(_chn_in=chn_in, _chn_out=chn_out, @@ -147,7 +155,8 @@ def __init__(self, cfgs, mode, chn_in=3, n_classes=1000, width_mult=1., dropout_ super(MobileNetV3, self).__init__() # setting of inverted residual blocks self.cfgs = cfgs - assert mode in ['large', 'small'] + if mode not in ['large', 'small']: + raise ValueError('unknown mode: %s' % repr(mode)) block = MobileInvertedResidualBlock # building layers layers = [] diff --git a/vega/networks/pytorch/customs/modnas/arch_space/torch/pyramidnet.py b/vega/networks/pytorch/customs/modnas/arch_space/torch/pyramidnet.py index 775ffbf..e928cd4 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/torch/pyramidnet.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/torch/pyramidnet.py @@ -1,13 +1,20 @@ -"""PyramidNet architectures. +# -*- coding:utf-8 -*- + +# This file is adapted from the PyramidNet-PyTorch library at +# https://github.com/dyhan0920/PyramidNet-PyTorch/ + +# 2020.6.29-Changed for Modular-NAS search space. +# Huawei Technologies Co., Ltd. +# Copyright 2020 Huawei Technologies Co., Ltd. + +"""PyramidNet architectures.""" -modified from https://github.com/dyhan0920/PyramidNet-PyTorch/ -""" import torch import torch.nn as nn -from ..slot import Slot from modnas.registry.construct import DefaultSlotTraversalConstructor from modnas.registry.construct import register as register_constructor from modnas.registry.arch_space import register +from ..slot import Slot class GroupConv(nn.Module): diff --git a/vega/networks/pytorch/customs/modnas/arch_space/torch/resnet.py b/vega/networks/pytorch/customs/modnas/arch_space/torch/resnet.py index 6337fde..f765454 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/torch/resnet.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/torch/resnet.py @@ -1,14 +1,21 @@ -"""ResNet architectures. +# -*- coding:utf-8 -*- + +# This file is adapted from the torchvision library at +# https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py + +# 2020.6.29-Changed for Modular-NAS search space. +# Huawei Technologies Co., Ltd. +# Copyright 2020 Huawei Technologies Co., Ltd. + +"""ResNet architectures.""" -modified from https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py -""" from functools import partial import torch.nn as nn -from ..slot import Slot from modnas.registry.construct import DefaultSlotTraversalConstructor from modnas.registry.construct import register as register_constructor from modnas.registry.arch_space import register from ..ops import Identity +from ..slot import Slot def conv3x3(in_planes, out_planes, stride=1, groups=1): diff --git a/vega/networks/pytorch/customs/modnas/arch_space/torch/shufflenetv2.py b/vega/networks/pytorch/customs/modnas/arch_space/torch/shufflenetv2.py index cf8d984..e5d9a3a 100644 --- a/vega/networks/pytorch/customs/modnas/arch_space/torch/shufflenetv2.py +++ b/vega/networks/pytorch/customs/modnas/arch_space/torch/shufflenetv2.py @@ -1,15 +1,23 @@ -"""ShuffleNetV2 architectures. +# -*- coding:utf-8 -*- + +# This file is adapted from the SinglePathOneShot library at +# https://github.com/megvii-model/SinglePathOneShot + +# 2020.6.29-Changed for Modular-NAS search space. +# Huawei Technologies Co., Ltd. +# Copyright 2020 Huawei Technologies Co., Ltd. + +"""ShuffleNetV2 architectures.""" -modified from https://github.com/megvii-model/SinglePathOneShot -""" import torch import torch.nn as nn -from .. import ops -from ..slot import Slot from modnas.registry.construct import register as register_constructor from modnas.registry.construct import DefaultMixedOpConstructor, DefaultSlotTraversalConstructor from modnas.registry.arch_space import build, register from ..slot import register_slot_ccs +from .. import ops +from ..slot import Slot + kernel_sizes = [3, 5, 7, 9] for k in kernel_sizes: @@ -23,19 +31,23 @@ def channel_split(x, split): """Return data split in channel dimension.""" - assert x.size(1) == split * 2 - return torch.split(x, split, dim=1) + if x.size(1) == split * 2: + return torch.split(x, split, dim=1) + else: + raise ValueError('Failed to return data split in channel dimension.') def shuffle_channels(x, groups=2): """Return data shuffled in channel dimension.""" batch_size, channels, height, width = x.size() - assert channels % groups == 0 - channels_per_group = channels // groups - x = x.view(batch_size, groups, channels_per_group, height, width) - x = x.transpose(1, 2).contiguous() - x = x.view(batch_size, channels, height, width) - return x + if channels % groups == 0: + channels_per_group = channels // groups + x = x.view(batch_size, groups, channels_per_group, height, width) + x = x.transpose(1, 2).contiguous() + x = x.view(batch_size, channels, height, width) + return x + else: + raise ValueError('Failed to return data shuffled in channel dimension.') class ShuffleUnit(nn.Module): diff --git a/vega/networks/pytorch/customs/modnas/compat.py b/vega/networks/pytorch/customs/modnas/compat.py index 03b04f9..ecc707e 100644 --- a/vega/networks/pytorch/customs/modnas/compat.py +++ b/vega/networks/pytorch/customs/modnas/compat.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ModularNAS arch space wrapper.""" @@ -33,7 +39,7 @@ def __init__(self, config = Config(kwargs) self.config = config self.net = None - is_augment = True if config.get('arch_desc') is not None else False + is_augment = True if config.get('proc') == 'augment' or config.get('arch_desc') is not None else False if not config.get('vega_no_construct', False) and is_augment: Config.apply(config, config.pop('augment', {})) self.net = get_default_constructors(self.config)(self.net) diff --git a/vega/networks/pytorch/customs/modnas/contrib/arch_space/activations/swish.py b/vega/networks/pytorch/customs/modnas/contrib/arch_space/activations/swish.py index 30575f8..4d26f49 100644 --- a/vega/networks/pytorch/customs/modnas/contrib/arch_space/activations/swish.py +++ b/vega/networks/pytorch/customs/modnas/contrib/arch_space/activations/swish.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Swish activation functions.""" import torch.nn as nn diff --git a/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/modifier.py b/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/modifier.py index 3c6d8ce..dac7a98 100644 --- a/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/modifier.py +++ b/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/modifier.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Module states modifier.""" +from typing import Callable, Union from torch.nn.modules.module import Module from torch import Tensor -from typing import Callable, Union def get_ori_param(module, name): diff --git a/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/sequential.py b/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/sequential.py index 3f151c3..21370ef 100644 --- a/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/sequential.py +++ b/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/sequential.py @@ -1,19 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Elastic sequential (depth) transformations.""" +from typing import Iterator, List, Optional, Tuple import torch.nn as nn -from .modifier import modify_attr, restore_module_attrs from torch import Tensor from torch.nn.modules.module import Module -from typing import Iterator, List, Optional, Tuple +from .modifier import modify_attr, restore_module_attrs def _hook_module_in(module: Module, inputs: Tuple[Tensor]) -> None: diff --git a/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/spatial.py b/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/spatial.py index 5dd77a8..d6b4923 100644 --- a/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/spatial.py +++ b/vega/networks/pytorch/customs/modnas/contrib/arch_space/elastic/spatial.py @@ -1,21 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Elastic spatial (width) transformations.""" +from typing import Callable, Iterator, List, Optional, Tuple, Type, Union import torch import torch.nn as nn -from .modifier import modify_param, modify_buffer, modify_attr,\ - restore_module_states, get_ori_buffer from torch import Tensor from torch.nn.modules.module import Module -from typing import Callable, Iterator, List, Optional, Tuple, Type, Union +from .modifier import modify_param, modify_buffer, modify_attr,\ + restore_module_states, get_ori_buffer def _conv2d_fan_out_trnsf(m: nn.Conv2d, idx: Tensor) -> None: diff --git a/vega/networks/pytorch/customs/modnas/contrib/arch_space/mobilenetv2_elastic.py b/vega/networks/pytorch/customs/modnas/contrib/arch_space/mobilenetv2_elastic.py index 8cbfc87..1d120d0 100644 --- a/vega/networks/pytorch/customs/modnas/contrib/arch_space/mobilenetv2_elastic.py +++ b/vega/networks/pytorch/customs/modnas/contrib/arch_space/mobilenetv2_elastic.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """MobileNetV2 Elastic Constructors & Exporters.""" from functools import partial @@ -53,8 +59,10 @@ def __call__(self, model): class MobileNetV2ElasticSpatialConstructor(DefaultSlotTraversalConstructor): """MobileNetV2 Elastic Spatial Constructor.""" - def __init__(self, fix_first=True, expansion_range=[1, 3, 6], rank_fn='l1_fan_in', search=True): + def __init__(self, fix_first=True, expansion_range=None, rank_fn='l1_fan_in', search=True): super().__init__(skip_exist=False) + if expansion_range is None: + expansion_range = [1, 3, 6] self.fix_first = fix_first self.first = False self.last_conv = None @@ -130,8 +138,10 @@ def on_update_handler(chn_in, param): class MobileNetV2ElasticSequentialConstructor(): """MobileNetV2 Elastic Sequential Constructor.""" - def __init__(self, repeat_range=[1, 2, 3, 4], search=True): + def __init__(self, repeat_range=None, search=True): super().__init__() + if repeat_range is None: + repeat_range = [1, 2, 3, 4] self.is_search = search self.repeat_range = repeat_range diff --git a/vega/networks/pytorch/customs/modnas/contrib/arch_space/stacnas.py b/vega/networks/pytorch/customs/modnas/contrib/arch_space/stacnas.py index 5e51f15..5eab488 100644 --- a/vega/networks/pytorch/customs/modnas/contrib/arch_space/stacnas.py +++ b/vega/networks/pytorch/customs/modnas/contrib/arch_space/stacnas.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """StacNAS Constructors & Exporters.""" from modnas.registry.arch_space import build diff --git a/vega/networks/pytorch/customs/nago.py b/vega/networks/pytorch/customs/nago.py index 818fdd6..6fe994e 100644 --- a/vega/networks/pytorch/customs/nago.py +++ b/vega/networks/pytorch/customs/nago.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """The NAGO model.""" import logging diff --git a/vega/networks/pytorch/customs/segmentation/common.py b/vega/networks/pytorch/customs/segmentation/common.py index 01925e2..6cc8e4c 100644 --- a/vega/networks/pytorch/customs/segmentation/common.py +++ b/vega/networks/pytorch/customs/segmentation/common.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Functions to implement the BiSeNet.""" @@ -58,7 +64,7 @@ class FeatureFusion(nn.Module): """FeatureFusion module.""" def __init__(self, in_planes, out_planes, - reduction=1, norm_layer={'norm_type': 'BN'}, Conv2d=nn.Conv2d): + reduction=1, norm_layer=None, Conv2d=nn.Conv2d): """Construct the FeatureFusion class. :param in_planes: input channels @@ -68,6 +74,8 @@ def __init__(self, in_planes, out_planes, :param reduction: reduction ratio. """ super(FeatureFusion, self).__init__() + if norm_layer is None: + norm_layer = {'norm_type': 'BN'} self.conv_1x1 = ConvBnRelu(in_planes, out_planes, 1, 1, 0, norm_layer=norm_layer, Conv2d=Conv2d) diff --git a/vega/networks/pytorch/customs/segmentation/evolveresnet.py b/vega/networks/pytorch/customs/segmentation/evolveresnet.py index c99e149..5a01bda 100644 --- a/vega/networks/pytorch/customs/segmentation/evolveresnet.py +++ b/vega/networks/pytorch/customs/segmentation/evolveresnet.py @@ -1,28 +1,34 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Decode and build BiSeNet.""" -import re + import torch.nn as nn -from .common import load_model from vega.modules.operators import ConvBnRelu from vega.modules.operators import conv3x3 from vega.modules.blocks import BasicBlock, BottleneckBlock, build_norm_layer +from .common import load_model class ResNet_arch(nn.Module): """ResNet_arch module.""" - def __init__(self, block, arch, base_channel, strides=[1, 2, 2, 2], - dilations=[1, 1, 1, 1], num_classes=1000, groups=1, base_width=64, - structure='full', Conv2d='Conv2d', norm_layer={"norm_type": 'BN'}): + def __init__(self, block, arch, base_channel, strides=None, + dilations=None, num_classes=1000, groups=1, base_width=64, + structure='full', Conv2d='Conv2d', norm_layer=None): """Construct the ResNet_arch class. :param block: BasicBlock or Bottleneck instance @@ -37,45 +43,53 @@ def __init__(self, block, arch, base_channel, strides=[1, 2, 2, 2], :param norm_layer: type of norm layer. :param Conv2d: type of conv layer. """ - assert structure in ['full', 'drop_last', 'backbone'], 'unknown structrue: %s' % repr(structure) - self.structure = structure - self.num_classes = num_classes - self.arch = [[int(a) for a in x] for x in arch.split('-')] - self.base_channel = base_channel - self.strides = strides - self.dilations = dilations - super(ResNet_arch, self).__init__() - self.conv1 = conv3x3(3, base_channel // 2, stride=2) - self.bn1 = build_norm_layer((base_channel // 2), **norm_layer) - self.relu = nn.ReLU(inplace=False) - self.conv2 = conv3x3(base_channel // 2, base_channel, stride=2) - self.bn2 = build_norm_layer((base_channel), **norm_layer) - self.res_layers = [] - self.block = block - total_expand = 0 - inplanes = planes = self.base_channel - self.stage_out_channels = [] - for i, arch in enumerate(self.arch): - num_expand = arch.count(2) - total_expand += num_expand - stride = self.strides[i] - res_layer, out_channels = self.make_res_layer( - self.block, - inplanes, - planes, - arch, - groups=groups, - base_width=base_width, - stride=stride, - norm_layer=norm_layer, - Conv2d=Conv2d) - self.stage_out_channels.append(out_channels) - planes = self.base_channel * 2 ** total_expand - inplanes = planes * self.block.expansion - layer_name = 'layer{}'.format(i + 1) - self.add_module(layer_name, res_layer) - self.res_layers.append(layer_name) - self.out_channels = out_channels + if strides is None: + strides = [1, 2, 2, 2] + if dilations is None: + dilations = [1, 1, 1, 1] + if norm_layer is None: + norm_layer = {"norm_type": 'BN'} + if structure in ['full', 'drop_last', 'backbone']: + self.structure = structure + self.num_classes = num_classes + self.arch = [[int(a) for a in x] for x in arch.split('-')] + self.base_channel = base_channel + self.strides = strides + self.dilations = dilations + super(ResNet_arch, self).__init__() + self.conv1 = conv3x3(3, base_channel // 2, stride=2) + self.bn1 = build_norm_layer((base_channel // 2), **norm_layer) + self.relu = nn.ReLU(inplace=False) + self.conv2 = conv3x3(base_channel // 2, base_channel, stride=2) + self.bn2 = build_norm_layer((base_channel), **norm_layer) + self.res_layers = [] + self.block = block + total_expand = 0 + inplanes = planes = self.base_channel + self.stage_out_channels = [] + for i, arch in enumerate(self.arch): + num_expand = arch.count(2) + total_expand += num_expand + stride = self.strides[i] + res_layer, out_channels = self.make_res_layer( + self.block, + inplanes, + planes, + arch, + groups=groups, + base_width=base_width, + stride=stride, + norm_layer=norm_layer, + Conv2d=Conv2d) + self.stage_out_channels.append(out_channels) + planes = self.base_channel * 2 ** total_expand + inplanes = planes * self.block.expansion + layer_name = 'layer{}'.format(i + 1) + self.add_module(layer_name, res_layer) + self.res_layers.append(layer_name) + self.out_channels = out_channels + else: + raise ValueError('unknown structrue: %s' % repr(structure)) def get_output_size(self, H=None, W=None): """Get size of the output. @@ -89,8 +103,10 @@ def get_output_size(self, H=None, W=None): elif self.structure == 'drop_last': return (self.out_channels,) else: - assert None not in [H, W], 'requires arguments H, W' - return (self.out_channels, H // 32, W // 32) + if None not in [H, W]: + return (self.out_channels, H // 32, W // 32) + else: + raise ValueError('requires arguments H, W') @staticmethod def make_res_layer(block, @@ -201,7 +217,7 @@ def build_archs(arch_string, pretrained_model=None, num_classes=1000, structure= class AutoSpatialPath(nn.Module): """Build spatial path from code string.""" - def __init__(self, layer, arch, norm_layer='BN', Conv2d=nn.Conv2d, stride=[1, 2, 2, 1], **kwargs): + def __init__(self, layer, arch, norm_layer='BN', Conv2d=nn.Conv2d, stride=None, **kwargs): """Build spatial path. :param layer: layers of spatial path @@ -212,6 +228,8 @@ def __init__(self, layer, arch, norm_layer='BN', Conv2d=nn.Conv2d, stride=[1, 2, :param **kwargs: other keywords. :return: output tensor """ + if stride is None: + stride = [1, 2, 2, 1] super(AutoSpatialPath, self).__init__() split_arch = arch.split('_') self.base_channels = int(split_arch[0]) diff --git a/vega/networks/pytorch/customs/segmentation/weights.py b/vega/networks/pytorch/customs/segmentation/weights.py index 61d5045..2ed0bfa 100644 --- a/vega/networks/pytorch/customs/segmentation/weights.py +++ b/vega/networks/pytorch/customs/segmentation/weights.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Weight operations of the norm layers.""" from torch import nn diff --git a/vega/networks/pytorch/customs/utils/layer.py b/vega/networks/pytorch/customs/utils/layer.py index 7d46930..ae2068b 100644 --- a/vega/networks/pytorch/customs/utils/layer.py +++ b/vega/networks/pytorch/customs/utils/layer.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Common module in NAGO.""" import torch from torch import nn from torch.nn.functional import adaptive_avg_pool2d from vega.common import ClassType, ClassFactory +from vega.networks.pytorch.heads.auxiliary_head import AuxiliaryHead from .ops import depthwise_separable_conv_general, Triplet_unit, PassThrough, BoundedScalarMultiply from .logical_graph import LogicalMasterGraph, LogicalCellGraph, LogicalOpGraph, EdgeMerge, \ LogicalOperation, Ops -from vega.networks.pytorch.heads.auxiliary_head import AuxiliaryHead def diff_size(x, size): diff --git a/vega/networks/pytorch/customs/utils/logical_graph.py b/vega/networks/pytorch/customs/utils/logical_graph.py index da7b3b8..12a13b8 100644 --- a/vega/networks/pytorch/customs/utils/logical_graph.py +++ b/vega/networks/pytorch/customs/utils/logical_graph.py @@ -1,19 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """LogicalGraph for NAGO.""" import time import collections -import numpy as np +import logging from dataclasses import dataclass from typing import List +import numpy as np import networkx as nx @@ -123,8 +130,8 @@ class Ops: all_ops = conv_ops + pooling_ops ops_to_num_params = {C3: 9, C5: 25, C31: 3, C13: 3, C1: 1, POOL3: 0, POOL5: 0} ops_to_kernel_size = {C3: (3, 3), C5: (5, 5), C31: (3, 1), C13: (1, 3), C1: (1, 1), POOL3: (3, 3), POOL5: (5, 5)} - assert _has_all_keys(ops_to_num_params, all_ops), "Ops must match" - assert _has_all_keys(ops_to_kernel_size, all_ops), "Ops must match" + if not _has_all_keys(ops_to_num_params, all_ops) or not _has_all_keys(ops_to_kernel_size, all_ops): + raise ValueError("Ops must match.") @dataclass @@ -185,6 +192,7 @@ def _init_graph(self, graphparams): self.nodes, self.input_nodes, self.output_nodes = get_graph_info(self.graph) return graph except Exception: + logging.debug('Failed to get graph.') continue self.nodes, self.input_nodes, self.output_nodes = ([], [], []) return graph @@ -310,14 +318,16 @@ class LogicalMasterGraph(LogicalOpGraph): def __init__(self, solution: GeneratorSolution): """Initialize LogicalMasterGraph.""" - assert len(solution.stage_ratios) == len(solution.channel_ratios), "Ratios should have same length" - self.child_nodes = [] - self.depth = "" - self.inputs = [] - self.inplanes = solution.channel_ratios[0] - self.role = NodeRoles.MASTER - self.toplvl_graph = self._init_graph(solution.master_params) - self._init_nodes(solution) + if len(solution.stage_ratios) == len(solution.channel_ratios): + self.child_nodes = [] + self.depth = "" + self.inputs = [] + self.inplanes = solution.channel_ratios[0] + self.role = NodeRoles.MASTER + self.toplvl_graph = self._init_graph(solution.master_params) + self._init_nodes(solution) + else: + raise ValueError("Ratios should have same length.") def _get_merging_cost(self): # TODO fix this, it's unprecise cost = [node._get_merging_cost() for node in self.child_nodes] diff --git a/vega/networks/pytorch/customs/utils/ops.py b/vega/networks/pytorch/customs/utils/ops.py index d6f9b63..7b1c53e 100644 --- a/vega/networks/pytorch/customs/utils/ops.py +++ b/vega/networks/pytorch/customs/utils/ops.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Ops for NAGO.""" import torch diff --git a/vega/networks/pytorch/cyclesrbodys/__init__.py b/vega/networks/pytorch/cyclesrbodys/__init__.py index 60aee24..29e1189 100644 --- a/vega/networks/pytorch/cyclesrbodys/__init__.py +++ b/vega/networks/pytorch/cyclesrbodys/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import cyclesr bodys.""" diff --git a/vega/networks/pytorch/cyclesrbodys/cyclesr_net.py b/vega/networks/pytorch/cyclesrbodys/cyclesr_net.py index 7f1814a..33e3295 100644 --- a/vega/networks/pytorch/cyclesrbodys/cyclesr_net.py +++ b/vega/networks/pytorch/cyclesrbodys/cyclesr_net.py @@ -1,22 +1,27 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is the class for Cyclesr model.""" import logging import torch - +from vega.common import ClassFactory, ClassType +from vega.common.config import Config from .trans_model import TransModel from .networks import initialize, requires_grad from .srmodels import VDSR, SRResNet -from vega.common import ClassFactory, ClassType -from vega.common.config import Config def define_SR(opt, use_cuda, use_distributed): @@ -29,7 +34,6 @@ def define_SR(opt, use_cuda, use_distributed): :return: SR model :rtype: nn.Module """ - # logging.info("==> We are using SR model: {}".format(config.which_model)) logging.info("==> Norm type in {} is {}".format(opt.name, opt.SR_norm_type)) if (opt.name == "VDSR"): net = VDSR(in_nc=opt.input_nc, out_nc=opt.input_nc, nf=opt.SR_nf, nb=opt.SR_nb, diff --git a/vega/networks/pytorch/cyclesrbodys/networks.py b/vega/networks/pytorch/cyclesrbodys/networks.py index 01af812..9f619c3 100644 --- a/vega/networks/pytorch/cyclesrbodys/networks.py +++ b/vega/networks/pytorch/cyclesrbodys/networks.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Basic layers.""" @@ -23,8 +29,7 @@ try: import horovod.torch as hvd except Exception: - # logging.warning("horovod not been installed, {}".format(str(e))) - pass + logging.debug("horovod not been installed.") def initialize(nets, init_gain=0.02, use_cuda=True, use_distributed=False): @@ -66,17 +71,17 @@ def init_w(module): return module_nets -def requires_grad(nets, requires_grad=False): +def requires_grad(nets, requires_grads=False): """Set requies_grad=Fasle for all the networks to avoid unnecessary computations. :param nets: a list of networks :type nets: list - :param requires_grad: whether the networks require gradients or not - :type requires_grad: bool + :param requires_grads: whether the networks require gradients or not + :type requires_grads: bool """ for net in nets: for param in net.parameters(): - param.requires_grad = requires_grad + param.requires_grad = requires_grads class ShortcutBlock(nn.Module): diff --git a/vega/networks/pytorch/cyclesrbodys/srmodels.py b/vega/networks/pytorch/cyclesrbodys/srmodels.py index 6ee4a2b..2a46644 100644 --- a/vega/networks/pytorch/cyclesrbodys/srmodels.py +++ b/vega/networks/pytorch/cyclesrbodys/srmodels.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """SR models.""" import math diff --git a/vega/networks/pytorch/cyclesrbodys/trans_model.py b/vega/networks/pytorch/cyclesrbodys/trans_model.py index 8582f4d..76ee3ac 100644 --- a/vega/networks/pytorch/cyclesrbodys/trans_model.py +++ b/vega/networks/pytorch/cyclesrbodys/trans_model.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is the class for Translation model.""" import itertools diff --git a/vega/networks/pytorch/detectors/__init__.py b/vega/networks/pytorch/detectors/__init__.py index 44fcdcf..9d9a2ea 100644 --- a/vega/networks/pytorch/detectors/__init__.py +++ b/vega/networks/pytorch/detectors/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import detector.""" diff --git a/vega/networks/pytorch/detectors/auto_lane_detector.py b/vega/networks/pytorch/detectors/auto_lane_detector.py index d67ba85..bc3c746 100644 --- a/vega/networks/pytorch/detectors/auto_lane_detector.py +++ b/vega/networks/pytorch/detectors/auto_lane_detector.py @@ -1,9 +1,9 @@ # -*- coding: utf-8 -*- """Defined faster rcnn detector.""" +from collections import ChainMap import torch from torch import nn from torch.nn import functional as F -from collections import ChainMap import ujson from vega.modules.module import Module from vega.common import ClassType, ClassFactory diff --git a/vega/networks/pytorch/gan/__init__.py b/vega/networks/pytorch/gan/__init__.py deleted file mode 100644 index 5faeacd..0000000 --- a/vega/networks/pytorch/gan/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Lazy import gan network.""" - -from vega.common.class_factory import ClassFactory - - -ClassFactory.lazy_register("vega.networks.pytorch.gan", { - "fully_super_network": ["network:Generator"], -}) diff --git a/vega/networks/pytorch/gan/fully_basic_blocks.py b/vega/networks/pytorch/gan/fully_basic_blocks.py deleted file mode 100644 index dc51b00..0000000 --- a/vega/networks/pytorch/gan/fully_basic_blocks.py +++ /dev/null @@ -1,407 +0,0 @@ - -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Defined ResNet Blocks For Detection.""" -from torch import nn -import torch.nn.functional as F - - -# 7 -PRIMITIVES = [ - 'none', - 'skip_connect', - 'conv_1x1', - 'conv_3x3', - 'conv_5x5', - 'dil_conv_3x3', - 'dil_conv_5x5' -] - -# 3 -PRIMITIVES_up = [ - 'nearest', - 'bilinear', - 'ConvTranspose' -] - -# 6 -PRIMITIVES_down = [ - 'avg_pool', - 'max_pool', - 'conv_3x3', - 'conv_5x5', - 'dil_conv_3x3', - 'dil_conv_5x5' -] - - -# ------------------------------------------------------------------------------------------------------------------- # - -OPS = { - 'none': lambda in_ch, out_ch, stride, sn, act: Zero(), - 'skip_connect': lambda in_ch, out_ch, stride, sn, act: Identity(), - 'conv_1x1': lambda in_ch, out_ch, stride, sn, act: Conv(in_ch, out_ch, 1, stride, 0, sn, act), - 'conv_3x3': lambda in_ch, out_ch, stride, sn, act: Conv(in_ch, out_ch, 3, stride, 1, sn, act), - 'conv_5x5': lambda in_ch, out_ch, stride, sn, act: Conv(in_ch, out_ch, 5, stride, 2, sn, act), - 'dil_conv_3x3': lambda in_ch, out_ch, stride, sn, act: DilConv(in_ch, out_ch, 3, stride, 2, 2, sn, act), - 'dil_conv_5x5': lambda in_ch, out_ch, stride, sn, act: DilConv(in_ch, out_ch, 5, stride, 4, 2, sn, act) -} - -OPS_down = { - 'avg_pool': lambda in_ch, out_ch, stride, sn, act: Pool(in_ch, out_ch, mode='Avg'), - 'max_pool': lambda in_ch, out_ch, stride, sn, act: Pool(in_ch, out_ch, mode='Max'), - 'conv_3x3': lambda in_ch, out_ch, stride, sn, act: Conv(in_ch, out_ch, 3, stride, 1, sn, act), - 'conv_5x5': lambda in_ch, out_ch, stride, sn, act: Conv(in_ch, out_ch, 5, stride, 2, sn, act), - 'dil_conv_3x3': lambda in_ch, out_ch, stride, sn, act: DilConv(in_ch, out_ch, 3, stride, 2, 2, sn, act), - 'dil_conv_5x5': lambda in_ch, out_ch, stride, sn, act: DilConv(in_ch, out_ch, 5, stride, 4, 2, sn, act) -} - -UPS = { - 'nearest': lambda in_ch, out_ch: Up(in_ch, out_ch, mode='nearest'), - 'bilinear': lambda in_ch, out_ch: Up(in_ch, out_ch, mode='bilinear'), - 'ConvTranspose': lambda in_ch, out_ch: Up(in_ch, out_ch, mode='convT') -} - -# ------------------------------------------------------------------------------------------------------------------- # - - -class Conv(nn.Module): - """Conv class.""" - - def __init__(self, in_ch, out_ch, kernel_size, stride, padding, sn, act): - super(Conv, self).__init__() - if sn: - self.conv = nn.utils.spectral_norm( - nn.Conv2d(in_ch, out_ch, kernel_size, stride=stride, padding=padding)) - else: - self.conv = nn.Conv2d(in_ch, out_ch, kernel_size, - stride=stride, padding=padding) - if act: - self.op = nn.Sequential(nn.ReLU(), self.conv) - else: - self.op = nn.Sequential(self.conv) - - def forward(self, x): - """call.""" - return self.op(x) - - -class DilConv(nn.Module): - """DilConv class.""" - - def __init__(self, in_ch, out_ch, kernel_size, stride, padding, dilation, sn, act): - super(DilConv, self).__init__() - if sn: - self.dilconv = nn.utils.spectral_norm( - nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation)) - else: - self.dilconv = \ - nn.Conv2d(in_ch, out_ch, kernel_size=kernel_size, - stride=stride, padding=padding, dilation=dilation) - if act: - self.op = nn.Sequential(nn.ReLU(), self.dilconv) - else: - self.op = nn.Sequential(self.dilconv) - - def forward(self, x): - """call.""" - return self.op(x) - - -class Identity(nn.Module): - """Identity class.""" - - def __init__(self): - super(Identity, self).__init__() - - def forward(self, x): - """call.""" - return x - - -class Zero(nn.Module): - """Zero class.""" - - def __init__(self): - super(Zero, self).__init__() - - def forward(self, x): - """call.""" - return x.mul(0.) - - -class Up(nn.Module): - """Up class.""" - - def __init__(self, in_ch, out_ch, mode=None): - super(Up, self).__init__() - self.up_mode = mode - if self.up_mode == 'convT': - self.convT = nn.Sequential( - nn.ReLU(), - nn.ConvTranspose2d( - in_ch, in_ch, kernel_size=3, stride=2, padding=1, output_padding=1, groups=in_ch, bias=False), - nn.Conv2d(in_ch, out_ch, kernel_size=1, padding=0, bias=False) - ) - else: - self.c = nn.Sequential( - nn.ReLU(), - nn.Conv2d(in_ch, out_ch, kernel_size=1) - ) - - def forward(self, x): - """call.""" - if self.up_mode == 'convT': - return self.convT(x) - else: - return self.c(F.interpolate(x, scale_factor=2, mode=self.up_mode)) - - -class Pool(nn.Module): - """Pool class.""" - - def __init__(self, in_ch, out_ch, mode=None): - super(Pool, self).__init__() - if mode == 'Avg': - self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0) - elif mode == 'Max': - self.pool = nn.MaxPool2d( - kernel_size=2, stride=2, padding=0, dilation=1) - - def forward(self, x): - """call.""" - return self.pool(x) - - -class MixedOp(nn.Module): - """MixedOp class.""" - - def __init__(self, in_ch, out_ch, stride, sn, act, primitives): - super(MixedOp, self).__init__() - self.ops = nn.ModuleList() - for primitive in primitives: - op = OPS[primitive](in_ch, out_ch, stride, sn, act) - self.ops.append(op) - - def forward(self, x): - """call.""" - return sum(op(x) for op in self.ops) - - -class MixedUp(nn.Module): - """MixedUp class.""" - - def __init__(self, in_ch, out_ch, primitives): - super(MixedUp, self).__init__() - self.ups = nn.ModuleList() - for primitive in primitives: - up = UPS[primitive](in_ch, out_ch) - self.ups.append(up) - - def forward(self, x): - """call.""" - return sum(up(x) for up in self.ups) - - -class MixedDown(nn.Module): - """MixedDown class.""" - - def __init__(self, in_ch, out_ch, stride, sn, act, primitives): - super(MixedDown, self).__init__() - self.ops = nn.ModuleList() - for primitive in primitives: - op = OPS_down[primitive](in_ch, out_ch, stride, sn, act) - self.ops.append(op) - - def forward(self, x): - """call.""" - return sum(op(x) for op in self.ops) - - -class Cell(nn.Module): - """Cell class.""" - - def __init__(self, in_channels, out_channels, up_mode, genotype, num_skip_in=0, norm=None): - super(Cell, self).__init__() - - self.up0 = MixedUp(in_channels, out_channels, [ - PRIMITIVES_up[genotype[0]]]) - self.up1 = MixedUp(in_channels, out_channels, [ - PRIMITIVES_up[genotype[1]]]) - if genotype[2] > 0: - self.c0 = MixedOp(out_channels, out_channels, 1, - False, True, [PRIMITIVES[genotype[2]]]) - if genotype[3] > 0: - self.c1 = MixedOp(out_channels, out_channels, 1, - False, True, [PRIMITIVES[genotype[3]]]) - if genotype[4] > 0: - self.c2 = MixedOp(out_channels, out_channels, 1, - False, True, [PRIMITIVES[genotype[4]]]) - if genotype[5] > 0: - self.c3 = MixedOp(out_channels, out_channels, 1, - False, True, [PRIMITIVES[genotype[5]]]) - if genotype[6] > 0: - self.c4 = MixedOp(out_channels, out_channels, 1, - False, True, [PRIMITIVES[genotype[6]]]) - - self.up_mode = up_mode - self.norm = norm - - # no norm - if norm: - if norm == 'bn': - self.n1 = nn.BatchNorm2d(in_channels) - self.n2 = nn.BatchNorm2d(out_channels) - elif norm == 'in': - self.n1 = nn.InstanceNorm2d(in_channels) - self.n2 = nn.InstanceNorm2d(out_channels) - else: - raise NotImplementedError(norm) - - # cross scale skip - self.skip_in_ops = None - if num_skip_in: - self.skip_in_ops = nn.ModuleList( - [nn.Conv2d(in_channels, out_channels, kernel_size=1) - for _ in range(num_skip_in)] - ) - - def forward(self, x, skip_ft=None): - """call.""" - node0 = self.up0(x) - node1 = self.up1(x) - _, _, ht, wt = node0.size() - - # for different topologies - if hasattr(self, 'c0'): - node2 = self.c0(node0) - if hasattr(self, 'c1'): - node2 = node2 + self.c1(node1) - else: - node2 = self.c1(node1) - - # skip out feat - h_skip_out = node2 - - # skip in feat - if self.skip_in_ops: - assert len(self.skip_in_ops) == len(skip_ft) - for ft, skip_in_op in zip(skip_ft, self.skip_in_ops): - node2 += skip_in_op(F.interpolate(ft, - size=(ht, wt), mode=self.up_mode)) - - # for different topologies - if hasattr(self, 'c2'): - node3 = self.c2(node0) - if hasattr(self, 'c3'): - node3 = node3 + self.c3(node1) - if hasattr(self, 'c4'): - node3 = node3 + self.c4(node2) - else: - if hasattr(self, 'c4'): - node3 = node3 + self.c4(node2) - else: - if hasattr(self, 'c3'): - node3 = self.c3(node1) - if hasattr(self, 'c4'): - node3 = node3 + self.c4(node2) - else: - node3 = self.c4(node2) - - return h_skip_out, node3 - - -def _downsample(x): - """Downsample with Avg Pooling.""" - return nn.AvgPool2d(kernel_size=2)(x) - - -class OptimizedDisBlock(nn.Module): - """OptimizedDisBlock class.""" - - def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=nn.ReLU()): - super(OptimizedDisBlock, self).__init__() - self.activation = activation - self.c1 = nn.Conv2d(in_channels, out_channels, - kernel_size=ksize, padding=pad) - self.c2 = nn.Conv2d(out_channels, out_channels, - kernel_size=ksize, padding=pad) - self.c_sc = nn.Conv2d(in_channels, out_channels, - kernel_size=1, padding=0) - self.c1 = nn.utils.spectral_norm(self.c1) - self.c2 = nn.utils.spectral_norm(self.c2) - self.c_sc = nn.utils.spectral_norm(self.c_sc) - - def residual(self, x): - """call.""" - h = x - h = self.c1(h) - h = self.activation(h) - h = self.c2(h) - h = _downsample(h) - return h - - def shortcut(self, x): - """call.""" - return self.c_sc(_downsample(x)) - - def forward(self, x): - """call.""" - return self.residual(x) + self.shortcut(x) - - -class DisCell(nn.Module): - """DisCell class.""" - - def __init__(self, in_channels, out_channels, hidden_channels=None, activation=nn.ReLU(), genotype=None): - super(DisCell, self).__init__() - if genotype[5] >= 0: - self.down0 = MixedDown(in_channels, out_channels, 2, True, True, [ - PRIMITIVES_down[genotype[5]]]) - self.down1 = MixedDown(in_channels, out_channels, 2, True, True, [ - PRIMITIVES_down[genotype[6]]]) - if genotype[0] > 0: - self.c0 = MixedOp(out_channels, out_channels, 1, - True, True, [PRIMITIVES[genotype[0]]]) - if genotype[1] > 0: - self.c1 = MixedOp(out_channels, out_channels, 1, - True, True, [PRIMITIVES[genotype[1]]]) - if genotype[2] > 0: - self.c2 = MixedOp(out_channels, out_channels, 1, - True, True, [PRIMITIVES[genotype[2]]]) - if genotype[3] > 0: - self.c3 = MixedOp(out_channels, out_channels, 1, - True, True, [PRIMITIVES[genotype[3]]]) - if genotype[4] > 0: - self.c4 = MixedOp(out_channels, out_channels, 1, - True, False, [PRIMITIVES[genotype[4]]]) - - def forward(self, x): - """call.""" - node0 = x - node1 = self.c0(node0) - if hasattr(self, 'c1'): - node2 = self.c1(node0) - if hasattr(self, 'c2'): - node2 = node2 + self.c2(node1) - else: - node2 = self.c2(node1) - if hasattr(self, 'c3'): - node3 = self.c3(node1) - if hasattr(self, 'c4'): - node3 = node3 + self.c4(node0) - else: - node3 = self.c4(node0) - if hasattr(self, 'down0'): - node4 = self.down0(node2) + self.down1(node3) - else: - node4 = node2 + node3 - return node4 diff --git a/vega/networks/pytorch/gan/fully_super_network.py b/vega/networks/pytorch/gan/fully_super_network.py deleted file mode 100644 index 6c1ac3c..0000000 --- a/vega/networks/pytorch/gan/fully_super_network.py +++ /dev/null @@ -1,127 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Defined GAN Blocks For Image Generation.""" -import torch -from torch import nn -import numpy as np -from .fully_basic_blocks import Cell, OptimizedDisBlock, DisCell -from vega.common import ClassFactory, ClassType - - -@ClassFactory.register(ClassType.NETWORK) -class Generator(nn.Module): - """Generator class.""" - - def __init__(self, latent_dim, bottom_width, gf_dim, genotypes): - super(Generator, self).__init__() - self.gf_dim = gf_dim - self.bottom_width = bottom_width - - self.base_latent_dim = latent_dim // 3 - self.l1 = nn.Linear(self.base_latent_dim, - (self.bottom_width ** 2) * self.gf_dim) - self.l2 = nn.Linear(self.base_latent_dim, (( - self.bottom_width * 2) ** 2) * self.gf_dim) - self.l3 = nn.Linear(self.base_latent_dim, (( - self.bottom_width * 4) ** 2) * self.gf_dim) - self.cell1 = Cell(self.gf_dim, self.gf_dim, 'nearest', - genotypes[0], num_skip_in=0) - self.cell2 = Cell(self.gf_dim, self.gf_dim, 'bilinear', - genotypes[1], num_skip_in=1) - self.cell3 = Cell(self.gf_dim, self.gf_dim, 'nearest', - genotypes[2], num_skip_in=2) - self.to_rgb = nn.Sequential( - nn.BatchNorm2d(self.gf_dim), nn.ReLU(), nn.Conv2d( - self.gf_dim, 3, 3, 1, 1), nn.Tanh() - ) - - def forward(self, z): - """Call Generator.""" - h = self.l1(z[:, :self.base_latent_dim])\ - .view(-1, self.gf_dim, self.bottom_width, self.bottom_width) - - n1 = self.l2(z[:, self.base_latent_dim:self.base_latent_dim * 2])\ - .view(-1, self.gf_dim, self.bottom_width * 2, self.bottom_width * 2) - - n2 = self.l3(z[:, self.base_latent_dim * 2:])\ - .view(-1, self.gf_dim, self.bottom_width * 4, self.bottom_width * 4) - - h1_skip_out, h1 = self.cell1(h) - h2_skip_out, h2 = self.cell2(h1 + n1, (h1_skip_out, )) - __, h3 = self.cell3(h2 + n2, (h1_skip_out, h2_skip_out)) - output = self.to_rgb(h3) - - return output - - -@ClassFactory.register(ClassType.NETWORK) -class Discriminator(nn.Module): - """Discriminator class.""" - - def __init__(self, df_dim, genotypes, activation=nn.ReLU()): - super(Discriminator, self).__init__() - self.ch = df_dim - self.activation = activation - self.block1 = OptimizedDisBlock(3, self.ch) - self.block2 = DisCell( - self.ch, self.ch, activation=activation, genotype=genotypes[0]) - self.block3 = DisCell( - self.ch, self.ch, activation=activation, genotype=genotypes[1]) - self.block4 = DisCell( - self.ch, self.ch, activation=activation, genotype=genotypes[2]) - self.l5 = nn.Linear(self.ch, 1, bias=False) - self.l5 = nn.utils.spectral_norm(self.l5) - - def forward(self, x): - """Call Discriminator.""" - h = x - layers = [self.block1, self.block2, self.block3] - model = nn.Sequential(*layers) - h = model(h) - h = self.block4(h) - h = self.activation(h) - # Global average pooling - h = h.sum(2).sum(2) - output = self.l5(h) - - return output - - -@ClassFactory.register(ClassType.NETWORK) -class GAN(nn.Module): - """GAN.""" - - def __init__(self, generator, discriminator, latent_dim, gen_bs): - super(GAN, self).__init__() - self.generator = ClassFactory.get_cls( - ClassType.NETWORK, generator.pop('type'))(**generator) - self.latent_dim = latent_dim - self.gen_bs = gen_bs - self.discriminator = ClassFactory.get_cls( - ClassType.NETWORK, discriminator.pop('type'))(**discriminator) - - def forward(self, imgs, step_name): - """Call GAN.""" - if step_name == 'dis': - z = torch.cuda.FloatTensor(np.random.normal( - 0, 1, (imgs.shape[0], self.latent_dim))) - real_imgs = imgs - real_validity = self.discriminator(real_imgs) - fake_imgs = self.generator(z).detach() - assert fake_imgs.size() == real_imgs.size() - fake_validity = self.discriminator(fake_imgs) - return (real_validity, fake_validity) - else: - gen_z = torch.cuda.FloatTensor(np.random.normal( - 0, 1, (self.gen_bs, self.latent_dim))) - gen_imgs = self.generator(gen_z) - fake_validity = self.discriminator(gen_imgs) - return fake_validity diff --git a/vega/networks/pytorch/heads/__init__.py b/vega/networks/pytorch/heads/__init__.py index 11c687b..b670b0d 100644 --- a/vega/networks/pytorch/heads/__init__.py +++ b/vega/networks/pytorch/heads/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import head network.""" diff --git a/vega/networks/pytorch/heads/auto_lane_head.py b/vega/networks/pytorch/heads/auto_lane_head.py index a6e2e00..f2100ec 100644 --- a/vega/networks/pytorch/heads/auto_lane_head.py +++ b/vega/networks/pytorch/heads/auto_lane_head.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Head of CurveLaneNas.""" import torch diff --git a/vega/networks/pytorch/heads/auxiliary_head.py b/vega/networks/pytorch/heads/auxiliary_head.py index f812ea3..1514984 100644 --- a/vega/networks/pytorch/heads/auxiliary_head.py +++ b/vega/networks/pytorch/heads/auxiliary_head.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """AuxiliaryHead for NAGO.""" import torch.nn as nn diff --git a/vega/networks/pytorch/losses/__init__.py b/vega/networks/pytorch/losses/__init__.py index 4b51db7..c5aaa08 100644 --- a/vega/networks/pytorch/losses/__init__.py +++ b/vega/networks/pytorch/losses/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import loss functions.""" @@ -19,4 +25,5 @@ "custom_cross_entropy_loss": ["trainer.loss:CustomCrossEntropyLoss"], "cross_entropy_label_smooth": ["trainer.loss:CrossEntropyLabelSmooth"], "mix_auxiliary_loss": ["trainer.loss:MixAuxiliaryLoss"], + "decaug_loss": ["trainer.loss:DecAugLoss"], }) diff --git a/vega/networks/pytorch/losses/cross_entropy_label_smooth.py b/vega/networks/pytorch/losses/cross_entropy_label_smooth.py index 2114dec..3e6819d 100644 --- a/vega/networks/pytorch/losses/cross_entropy_label_smooth.py +++ b/vega/networks/pytorch/losses/cross_entropy_label_smooth.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Cross Entropy Label Smooth Loss.""" import torch diff --git a/vega/networks/pytorch/losses/custom_cross_entropy_loss.py b/vega/networks/pytorch/losses/custom_cross_entropy_loss.py index b2cd791..27f7909 100644 --- a/vega/networks/pytorch/losses/custom_cross_entropy_loss.py +++ b/vega/networks/pytorch/losses/custom_cross_entropy_loss.py @@ -1,19 +1,25 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Cross Entropy Loss.""" import torch import torch.nn.functional as F from vega.modules.module import Module -from .reduce_loss import weight_reduce_loss from vega.common import ClassFactory, ClassType +from .reduce_loss import weight_reduce_loss def cross_entropy(pred, label, weight=None, reduction='mean', avg_factor=None): @@ -82,11 +88,13 @@ def mask_cross_entropy(pred, target, label, reduction='mean', avg_factor=None): :param avg_factor: avg factor :return: loss """ - assert reduction == 'mean' and avg_factor is None - num_rois = pred.size()[0] - inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) - pred_slice = pred[inds, label].squeeze(1) - return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None] + if reduction == 'mean' and avg_factor is None: + num_rois = pred.size()[0] + inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device) + pred_slice = pred[inds, label].squeeze(1) + return F.binary_cross_entropy_with_logits(pred_slice, target, reduction='mean')[None] + else: + raise ValueError('Falied to calculate mask cross_entropy.') @ClassFactory.register(ClassType.NETWORK) @@ -120,9 +128,11 @@ def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_over :param reduction_override: reduce function :return: loss """ - assert reduction_override in (None, 'none', 'mean', 'sum') - reduction = ( - reduction_override if reduction_override else self.reduction) - loss_cls = self.loss_weight * self.loss_function(cls_score, label, weight, reduction=reduction, - avg_factor=avg_factor, **kwargs) - return loss_cls + if reduction_override in (None, 'none', 'mean', 'sum'): + reduction = ( + reduction_override if reduction_override else self.reduction) + loss_cls = self.loss_weight * self.loss_function(cls_score, label, weight, reduction=reduction, + avg_factor=avg_factor, **kwargs) + return loss_cls + else: + raise ValueError('Falied to calculate CustomCrossEntropyLoss.') diff --git a/vega/networks/pytorch/losses/decaug_loss.py b/vega/networks/pytorch/losses/decaug_loss.py new file mode 100644 index 0000000..ed89cb9 --- /dev/null +++ b/vega/networks/pytorch/losses/decaug_loss.py @@ -0,0 +1,79 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DecAug loss""" + +import random +import torch +from vega.common import ClassFactory, ClassType +from vega.modules.module import Module +from vega.modules.operators import ops + + +@ClassFactory.register(ClassType.LOSS) +class DecAugLoss(Module): + + def __init__(self, balance1=0.01, balance2=0.01, balanceorth=0.01, epsilon=1e-8, perturbation=1.0): + super(DecAugLoss, self).__init__() + self.balance1 = balance1 + self.balance2 = balance2 + self.balanceorth = balanceorth + self.epsilon = epsilon + self.perturbation = perturbation + self.cross_entropy = ClassFactory.get_cls(ClassType.LOSS, "CrossEntropyLoss")() + + def forward(self, x, targets=None): + _, logits_category, logits_concept, feature_category, feature_category, model = x + gt_label, gt_concept = targets + loss1 = self.cross_entropy(logits_category, gt_label) + loss2 = self.cross_entropy(logits_concept, gt_concept) + parm = {} + for name, parameters in model.named_parameters(): + parm[name] = parameters + # concept branch + w_branch = parm['concept_branch.weight'] + w_tensor = parm['fcc0.weight'] + # classification branch + w_branch_l = parm['category_branch.weight'] + w_tensor_l = parm['fc0.weight'] + + w_out = parm['classification.weight'] + b_out = parm['classification.bias'] + + w = ops.matmul(w_tensor, w_branch) + grad = -1 * w[gt_concept] + ops.matmul(logits_concept.detach(), w) + grad_norm = grad / (grad.norm(2, dim=1, keepdim=True) + self.epsilon) + + w_l = ops.matmul(w_tensor_l, w_branch_l) + grad_l = -1 * w_l[gt_label] + ops.matmul(logits_category.detach(), w_l) + grad_norm_l = grad_l / (grad_l.norm(2, dim=1, keepdim=True) + self.epsilon) + b, L = grad_norm_l.shape + + grad_norm = grad_norm.reshape(b, 1, L) + grad_norm_l = grad_norm_l.reshape(b, L, 1) + loss_orth = ((torch.bmm(grad_norm, grad_norm_l).cuda()) ** 2).sum() + + grad_aug = -1 * w_tensor[gt_concept] + ops.matmul(logits_concept.detach(), w_tensor) + FGSM_attack = self.perturbation * (grad_aug.detach() / (grad_aug.detach().norm(2, dim=1, keepdim=True) + self.epsilon)) + + ratio = random.random() + feature_aug = ratio * FGSM_attack + embs = ops.concat((feature_category, feature_category + feature_aug), 1) + output = ops.matmul(embs, w_out.transpose(0, 1)) + b_out + + loss_class = self.cross_entropy(output, gt_label) + loss = loss_class + self.balance1 * loss1 + self.balance2 * loss2 + self.balanceorth * loss_orth + return loss diff --git a/vega/networks/pytorch/losses/mix_auxiliary_loss.py b/vega/networks/pytorch/losses/mix_auxiliary_loss.py index cf98fb4..8f15921 100644 --- a/vega/networks/pytorch/losses/mix_auxiliary_loss.py +++ b/vega/networks/pytorch/losses/mix_auxiliary_loss.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Mix Auxiliary Loss.""" import torch.nn as nn diff --git a/vega/networks/pytorch/losses/reduce_loss.py b/vega/networks/pytorch/losses/reduce_loss.py index 2948a08..24d7ae5 100644 --- a/vega/networks/pytorch/losses/reduce_loss.py +++ b/vega/networks/pytorch/losses/reduce_loss.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Reduce Loss.""" import functools diff --git a/vega/networks/pytorch/losses/smooth_l1_loss.py b/vega/networks/pytorch/losses/smooth_l1_loss.py index 7fb4d0a..0309b32 100644 --- a/vega/networks/pytorch/losses/smooth_l1_loss.py +++ b/vega/networks/pytorch/losses/smooth_l1_loss.py @@ -1,18 +1,24 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Smooth L1 Loss.""" import torch from vega.modules.module import Module -from .reduce_loss import weighted_loss from vega.common import ClassType, ClassFactory +from .reduce_loss import weighted_loss @weighted_loss @@ -24,11 +30,12 @@ def smooth_l1_loss(pred, target, beta=1.0): :param beta: beta :return: loss """ - assert beta > 0 - assert pred.size() == target.size() and target.numel() > 0 - diff = torch.abs(pred - target) - loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta) - return loss + if beta > 0 and pred.size() == target.size() and target.numel() > 0: + diff = torch.abs(pred - target) + loss = torch.where(diff < beta, 0.5 * diff * diff / beta, diff - 0.5 * beta) + return loss + else: + raise ValueError('Failed to calculate smooth l1 loss.') @ClassFactory.register(ClassType.NETWORK) diff --git a/vega/networks/pytorch/losses/sum_loss.py b/vega/networks/pytorch/losses/sum_loss.py index 91d0557..82c9605 100644 --- a/vega/networks/pytorch/losses/sum_loss.py +++ b/vega/networks/pytorch/losses/sum_loss.py @@ -1,21 +1,28 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Sum_loss for detection task.""" + +import os +import logging +from collections import OrderedDict import torch from torch import nn -from collections import OrderedDict from vega.common import ClassType, ClassFactory -import os -import pickle -import logging +from vega.common import FileOps @ClassFactory.register(ClassType.LOSS) @@ -53,11 +60,9 @@ def forward(self, input, target=None): init_loss = [_value for _key, _value in log_vars.items() if 'loss' in _key] if hasattr(self, "dynamic_loss_weight"): - # save the init loss loss_save = [float(_value.detach().cpu().numpy()) for _value in init_loss] save_file = os.path.join(self.save_path, "muti_loss.pkl") - with open(save_file, "wb") as f: - pickle.dump(loss_save, f) + FileOps.dump_pickle(loss_save, save_file) if len(self.dynamic_loss_weight) != len(init_loss): logging.error("The length of the loss must be same with the length of the weight, but got {} and {}" @@ -67,13 +72,6 @@ def forward(self, input, target=None): sum_loss = sum(weighted_loss) else: sum_loss = sum(init_loss) - # Debug - """ - if loss > 100: - logging.error(str(losses)) - import os - os._exit() - """ return sum_loss def adaptive_muti_loss(self, save_path, weight): diff --git a/vega/networks/pytorch/necks/__init__.py b/vega/networks/pytorch/necks/__init__.py index 4050f4a..62d1ec4 100644 --- a/vega/networks/pytorch/necks/__init__.py +++ b/vega/networks/pytorch/necks/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import necks.""" diff --git a/vega/networks/pytorch/necks/ffm.py b/vega/networks/pytorch/necks/ffm.py index b1030db..41147c9 100644 --- a/vega/networks/pytorch/necks/ffm.py +++ b/vega/networks/pytorch/necks/ffm.py @@ -1,18 +1,24 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """CurveLane neck for detection.""" import torch import torch.nn as nn -from ..blocks.layer_creator import LayerCreator from vega.common import ClassType, ClassFactory +from ..blocks.layer_creator import LayerCreator class ConvPack(nn.Module): diff --git a/vega/networks/pytorch/necks/fpn.py b/vega/networks/pytorch/necks/fpn.py index 070507e..3894640 100644 --- a/vega/networks/pytorch/necks/fpn.py +++ b/vega/networks/pytorch/necks/fpn.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """FPN neck for detection.""" from vega.common.class_factory import ClassType, ClassFactory diff --git a/vega/networks/pytorch/ops/__init__.py b/vega/networks/pytorch/ops/__init__.py index d31cca5..5ffb675 100644 --- a/vega/networks/pytorch/ops/__init__.py +++ b/vega/networks/pytorch/ops/__init__.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import ops.""" diff --git a/vega/networks/pytorch/ops/fmdunit.py b/vega/networks/pytorch/ops/fmdunit.py index af4b842..8824784 100644 --- a/vega/networks/pytorch/ops/fmdunit.py +++ b/vega/networks/pytorch/ops/fmdunit.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined FMD Unit.""" import torch diff --git a/vega/networks/pytorch/transformer/__init__.py b/vega/networks/pytorch/transformer/__init__.py new file mode 100644 index 0000000..ca5828c --- /dev/null +++ b/vega/networks/pytorch/transformer/__init__.py @@ -0,0 +1,23 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Lazy import transformer.""" + +from vega.common.class_factory import ClassFactory + +ClassFactory.lazy_register("vega.networks.pytorch.transformer", { + "vit": ["VisionTransformer"] +}) diff --git a/vega/networks/pytorch/transformer/vit.py b/vega/networks/pytorch/transformer/vit.py new file mode 100644 index 0000000..2991a9b --- /dev/null +++ b/vega/networks/pytorch/transformer/vit.py @@ -0,0 +1,232 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of Visison Transformer(ViT).""" +import logging +from functools import partial +from collections import OrderedDict +from torch.nn import functional as F +import torch +import torch.nn as nn +from timm.models.layers import DropPath +from vega.common.class_factory import ClassFactory, ClassType +from vega.modules.module import Module + +_logger = logging.getLogger(__name__) + + +class GELU(nn.Module): + """Applies the Gaussian Error Linear Units function (w/ dummy inplace arg).""" + + def __init__(self, inplace: bool = False): + super(GELU, self).__init__() + + def forward(self, input: torch.Tensor) -> torch.Tensor: + """Forward.""" + return F.gelu(input) + + +class Mlp(nn.Module): + """Mlp layer in Transformer.""" + + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + """Forward mlp layer.""" + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + """Attention layer in Transformer.""" + + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + """Forward Attention layer.""" + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + """Block of Transformer, which contains one Attenson layer and one MLP layaer.""" + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def forward(self, x): + """Forward block.""" + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class PatchEmbed(nn.Module): + """Image to Patch Embedding.""" + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None): + super().__init__() + img_size = (img_size, img_size) + patch_size = (patch_size, patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.patch_grid = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.patch_grid[0] * self.patch_grid[1] + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity() + + def forward(self, x): + """Forward PatchEmbed.""" + x = self.proj(x).flatten(2).transpose(1, 2) + x = self.norm(x) + return x + + +@ClassFactory.register(ClassType.NETWORK) +class VisionTransformer(Module): + """Vision Transformer:`An Image is Worth 16x16 Words: Transformers for Image Recognition atScale. + + :param img_size: input image size + :type img_size: int, tuple + :param patch_size: patch_size + :type patch_size: int, tuple + :param in_chans: number of input channels + :type in_chans: int + :param num_classes: number of class for classification head + :type num_classes: int + :param embed_dim: embedding dimension + :type embed_dim: int + :param depth: depth of transformer + :type depth: int + :param num_heads: number of attention heads + :type num_heads: int + :param mlp_ratio: ration of mlp hidden dim to embedding dim + :type mlp_ratio: int + :param qkv_bias: enable biad for qkv if True + :type qkv_bias: bool + :param qk_scale: override default qk scale of head_dim ** -0.5 if set + :type qk_scale: float + :param representation_size: enable and set representation layer (pre-logits) to this value if set + :type representation_size: (Optional[int]) + :param distilled : model includes a distillation token and head as in DeiT models + :type distilled: bool + :parm drop_rate: dropout rate + :type drop_rate: float + :parm attn_drop_rate : attention dropout rate + :type attn_drop_rate: float + :param drop_path_rate: stochastic depth rate + :type drop_path_rate: float + :param embed_layer: patch embedding layer + :type embed_layer: nn.Module + :parm norm_layer: : normalization layer + :type norm_layer: nn.Module + :param weight_init: weight init scheme + :type weight_init: str + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, distilled=False, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, + act_layer=None, weight_init=''): + """Construct the VisionTransformer class.""" + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_tokens = 2 if distilled else 1 + norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) + act_layer = act_layer or GELU + + self.patch_embed = embed_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + + self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) + self.dist_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if distilled else None + self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + self.num_tokens, embed_dim)) + self.pos_drop = nn.Dropout(p=drop_rate) + + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = nn.Sequential(*[ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Representation layer + if representation_size and not distilled: + self.num_features = representation_size + self.pre_logits = nn.Sequential(OrderedDict([ + ('fc', nn.Linear(embed_dim, representation_size)), + ('act', nn.Tanh()) + ])) + else: + self.pre_logits = nn.Identity() + + # Classifier head(s) + self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() + + def forward(self, x): + """Forward VisionTransformer.""" + x = self.patch_embed(x) + cls_token = self.cls_token.expand(x.shape[0], -1, -1) # stole cls_tokens impl from Phil Wang, thanks + + x = torch.cat((cls_token, x), dim=1) + x = self.pos_drop(x + self.pos_embed) + x = self.blocks(x) + x = self.norm(x) + x = self.pre_logits(x[:, 0]) + x = self.head(x) + return x diff --git a/vega/networks/quant.py b/vega/networks/quant.py index 96d0747..cb32519 100644 --- a/vega/networks/quant.py +++ b/vega/networks/quant.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Quantized Convlution.""" import logging diff --git a/vega/networks/resnet.py b/vega/networks/resnet.py index 2077b64..a7ce639 100644 --- a/vega/networks/resnet.py +++ b/vega/networks/resnet.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for network.""" from vega.common import ClassFactory, ClassType from vega.modules.module import Module -from .resnet_general import ResNetGeneral from vega.modules.operators.ops import Linear, AdaptiveAvgPool2d, View +from .resnet_general import ResNetGeneral @ClassFactory.register(ClassType.NETWORK) diff --git a/vega/networks/resnet_det.py b/vega/networks/resnet_det.py index 5120cce..c5314ac 100644 --- a/vega/networks/resnet_det.py +++ b/vega/networks/resnet_det.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for network.""" from vega.common import ClassFactory, ClassType diff --git a/vega/networks/resnet_general.py b/vega/networks/resnet_general.py index 82ae128..c232793 100644 --- a/vega/networks/resnet_general.py +++ b/vega/networks/resnet_general.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for general network.""" from vega.common import ClassFactory, ClassType diff --git a/vega/networks/resnext_det.py b/vega/networks/resnext_det.py index 390b3f9..926fcca 100644 --- a/vega/networks/resnext_det.py +++ b/vega/networks/resnext_det.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for network.""" from vega.common import ClassFactory, ClassType diff --git a/vega/networks/sgas_network.py b/vega/networks/sgas_network.py index e02a97f..e9ef279 100644 --- a/vega/networks/sgas_network.py +++ b/vega/networks/sgas_network.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """SGAS network.""" from vega.common import ClassFactory, ClassType diff --git a/vega/networks/simple_cnn.py b/vega/networks/simple_cnn.py index 992108f..e38763b 100644 --- a/vega/networks/simple_cnn.py +++ b/vega/networks/simple_cnn.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Simple CNN network.""" diff --git a/vega/networks/spnet_backbone.py b/vega/networks/spnet_backbone.py index ebd5a86..5926947 100644 --- a/vega/networks/spnet_backbone.py +++ b/vega/networks/spnet_backbone.py @@ -1,14 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for network.""" + +import logging +import math +import torch +import torch.nn as nn from vega.common import ClassFactory, ClassType from vega.modules.connections import OutlistSequential from vega.networks.necks import BasicBlock, Bottleneck, ResNeXt_Block @@ -16,11 +27,6 @@ from vega.modules.module import Module from vega.modules.connections import Sequential from vega.networks.pytorch.backbones import match_name, remove_layers, load_checkpoint -import torch -from collections import OrderedDict -import logging -import torch.nn as nn -import math from .resnet_det import base_arch_code base_blcok = {'BasicBlock': BasicBlock, diff --git a/vega/networks/super_network.py b/vega/networks/super_network.py index eb4ac39..bb3f29c 100644 --- a/vega/networks/super_network.py +++ b/vega/networks/super_network.py @@ -1,13 +1,20 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """CARS and DARTS network.""" +import vega from vega.common import ClassFactory, ClassType from vega.modules.blocks import AuxiliaryHead from vega.modules.connections import Cells @@ -49,6 +56,10 @@ def build(self): """Initialize architecture parameters.""" self.set_parameters('alphas_normal', 1e-3 * ops.random_normal(self.len_alpha, self.num_ops)) self.set_parameters('alphas_reduce', 1e-3 * ops.random_normal(self.len_alpha, self.num_ops)) + if vega.is_torch_backend(): + self.alphas_normal.requires_grad = False + self.alphas_reduce.requires_grad = False + self._apply_once = False # Ensure that the build function is not called in the forward function. @property def learnable_params(self): diff --git a/vega/networks/tensorflow/__init__.py b/vega/networks/tensorflow/__init__.py index 07e0747..c79e319 100644 --- a/vega/networks/tensorflow/__init__.py +++ b/vega/networks/tensorflow/__init__.py @@ -1,29 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Lazy import tensorflow networks.""" -from .network import Sequential from vega.common.class_factory import ClassFactory +from .network import Sequential ClassFactory.lazy_register("vega.networks.tensorflow", { "resnet_tf": ["ResNetTF", 'ResNetSlim'], # backbones "backbones.resnet_det": ["ResNetDet"], - # customs - "customs.edvr.edvr": ["EDVR"], - "customs.gcn_regressor": ["GCNRegressor"], - # detectors - "detectors.faster_rcnn_trainer_callback": ["FasterRCNNTrainerCallback"], - "detectors.faster_rcnn": ["FasterRCNN"], - "detectors.tf_optimizer": ["TFOptimizer"], # losses "losses.cross_entropy_loss": ["CrossEntropyLoss"], "losses.mix_auxiliary_loss": ["MixAuxiliaryLoss"], diff --git a/vega/networks/tensorflow/backbones/resnet_det.py b/vega/networks/tensorflow/backbones/resnet_det.py index 028b6bd..995fc1c 100644 --- a/vega/networks/tensorflow/backbones/resnet_det.py +++ b/vega/networks/tensorflow/backbones/resnet_det.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Residual Network.""" import tensorflow as tf diff --git a/vega/networks/tensorflow/customs/edvr/__init__.py b/vega/networks/tensorflow/customs/edvr/__init__.py deleted file mode 100644 index 507647b..0000000 --- a/vega/networks/tensorflow/customs/edvr/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .edvr import EDVR diff --git a/vega/networks/tensorflow/customs/edvr/arch_util.py b/vega/networks/tensorflow/customs/edvr/arch_util.py deleted file mode 100644 index c794a49..0000000 --- a/vega/networks/tensorflow/customs/edvr/arch_util.py +++ /dev/null @@ -1,333 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""arch util modules.""" -import math -import tensorflow as tf - - -def resize(x, size, align_corners=False, name=None, half_pixel_centers=False, method='bicubic'): - """Resize function.""" - if method == 'bicubic': - upsampling = tf.image.resize_bicubic - elif method == 'bilinear': - upsampling = tf.image.resize_bilinear - else: - raise ValueError - return upsampling(x, size=size, align_corners=align_corners, name=name, half_pixel_centers=half_pixel_centers) - - -def calculate_gain(nonlinearity, param=None): - """Calculate gain for linear functions.""" - linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d'] - if nonlinearity in linear_fns or nonlinearity == 'sigmoid': - return 1 - elif nonlinearity == 'tanh': - return 5.0 / 3 - elif nonlinearity == 'relu': - return math.sqrt(2.0) - elif nonlinearity == 'leakyrelu': - if param is None: - negative_slope = 0.01 - elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float): - # True/False are instances of int, hence check above - negative_slope = param - else: - raise ValueError("negative_slope {} not a valid number".format(param)) - return math.sqrt(2.0 / (1 + negative_slope ** 2)) - else: - raise ValueError("Unsupported nonlinearity {}".format(nonlinearity)) - - -def calculate_fan(kernel_size, in_channels, out_channels=None, mode='fan_in'): - """Calculate fan value.""" - if mode == 'fan_in': - fan = in_channels - elif mode == 'fan_out': - fan = out_channels - else: - raise KeyError - for k in kernel_size: - fan *= k - return fan - - -def get_initializer(init_cfg, in_channels, out_channels, kernel_size): - """Get initializer of random method.""" - type = init_cfg.pop('type') - - if type == 'kaiming_uniform': - a = init_cfg.pop('a', 0) - mode = init_cfg.pop('mode', 'fan_in') - nonlinearity = init_cfg.pop('nonlinearity', 'leakyrelu') - fan = calculate_fan(kernel_size, in_channels, out_channels, mode) - gain = calculate_gain(nonlinearity, a) - std = gain / math.sqrt(fan) - bound = math.sqrt(3.0) * std - initializer = tf.random_uniform_initializer(-bound, bound) - elif type == 'kaiming_normal': - a = init_cfg.pop('a', 0) - mode = init_cfg.pop('mode', 'fan_in') - nonlinearity = init_cfg.pop('nonlinearity', 'leakyrelu') - fan = calculate_fan(kernel_size, in_channels, out_channels, mode) - gain = calculate_gain(nonlinearity, a) - std = gain / math.sqrt(fan) - initializer = tf.random_normal_initializer(0.0, std) - elif type == 'xavier_uniform': - gain = init_cfg.pop('gain', 1.) - fan_in = calculate_fan(kernel_size, in_channels, out_channels, 'fan_in') - fan_out = calculate_fan(kernel_size, in_channels, out_channels, 'fan_out') - std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) - a = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation - initializer = tf.random_uniform_initializer(-a, a) - elif type == 'xavier_normal': - gain = init_cfg.pop('gain', 1.) - fan_in = calculate_fan(kernel_size, in_channels, out_channels, 'fan_in') - fan_out = calculate_fan(kernel_size, in_channels, out_channels, 'fan_out') - std = gain * math.sqrt(2.0 / float(fan_in + fan_out)) - initializer = tf.random_normal_initializer(0.0, std) - else: - raise NotImplementedError - - return initializer - - -def pair(x, dims=2): - """List pair in dimensions.""" - if isinstance(x, list) or isinstance(x, tuple): - if len(x) != dims: - raise Exception('length of x must equal to dims.') - elif isinstance(x, int): - x = [x] * dims - else: - raise ValueError - return x - - -def Conv2D(x, filters, kernel_size=(3, 3), strides=(1, 1), padding='same', dilations=(1, 1), use_bias=True, - kernel_initializer=None, bias_initializer=tf.zeros_initializer(), trainable=True, name='Conv2D'): - """Convolution 2D layer.""" - if kernel_initializer is None: - kernel_initializer = get_initializer( - dict(type='kaiming_uniform', a=math.sqrt(5)), int(x.shape[-1]), filters, kernel_size) - if bias_initializer is None: - fan = calculate_fan(kernel_size, int(x.shape[-1])) - bound = 1 / math.sqrt(fan) - bias_initializer = tf.random_uniform_initializer(-bound, bound) - - x = tf.layers.conv2d( - x, - filters=filters, - kernel_size=kernel_size, - strides=strides, - padding=padding.lower(), - dilation_rate=dilations, - use_bias=use_bias, - kernel_initializer=kernel_initializer, - bias_initializer=bias_initializer, - trainable=trainable, - name=name, - ) - return x - - -def ReLU(x, name=None): - """Activation layer of ReLU.""" - x = tf.nn.relu(x, name=name) - return x - - -def LeakyReLU(x, alpha=0.1, name=None): - """Leaky ReLU activation layer.""" - x = tf.nn.leaky_relu(x, alpha=alpha, name=name) - return x - - -class ActLayer(object): - """Activation layer.""" - - def __init__(self, cfg, name=None): - super(ActLayer, self).__init__() - self.type = cfg.get('type').lower() - if self.type == 'leakyrelu': - self.alpha = cfg.get('alpha', 0.2) - self.name = name - - def _forward(self, x): - if self.type == 'relu': - return ReLU(x, name=self.name) - elif self.type == 'leakyrelu': - return LeakyReLU(x, alpha=self.alpha, name=self.name) - else: - raise NotImplementedError - - def __call__(self, x): - """Forward function of act layer.""" - shape = list(map(int, x.shape)) - if len(shape) == 5: - # TODO - # Ascend currently do not support 5D relu - x_4d = tf.reshape(x, [-1] + shape[2:]) - x_4d = self._forward(x_4d) - x = tf.reshape(x_4d, shape) - else: - x = self._forward(x) - - return x - - -def ConvModule(x, filters, kernel_size=(3, 3), strides=(1, 1), padding='same', dilations=(1, 1), use_bias=True, - kernel_initializer=None, bias_initializer=None, act_cfg=None, trainable=True, name='Conv2D'): - """Convolution and activation module.""" - if act_cfg is not None: - nonlinearity = act_cfg.get('type').lower() - if nonlinearity == 'leakyrelu': - a = act_cfg.get('alpha', 0.01) - else: - nonlinearity = 'relu' - a = 0 - if kernel_initializer is None: - kernel_initializer = get_initializer( - dict(type='kaiming_uniform', a=a, nonlinearity=nonlinearity), int(x.shape[-1]), filters, kernel_size) - - x = Conv2D(x, filters, kernel_size, strides, padding, dilations, use_bias, - kernel_initializer=kernel_initializer, bias_initializer=None, - trainable=True, name=name) - - if act_cfg is not None: - x = ActLayer(act_cfg)(x) - - return x - - -def depth_to_space(x, scale, use_default=False): - """Depth to space function.""" - if use_default: - out = tf.depth_to_space(x, scale) - else: - b, h, w, c = list(map(int, x.shape)) - out = tf.reshape(x, [b, h, w, scale, scale, -1]) - out = tf.transpose(out, [0, 1, 3, 2, 4, 5]) - out = tf.reshape(out, [b, h * scale, w * scale, -1]) - return out - - -def tf_split(x, num_or_size_splits, axis=0, num=None, keep_dims=False): - """Split feature map of high dimension into list of feature map of low dimension.""" - x_list = tf.split(x, num_or_size_splits, axis, num) - - if not keep_dims: - x_list2 = [tf.squeeze(x_, axis) for x_ in x_list] - return x_list2 - - return x_list - - -class ResBlockNoBN(object): - """Residual block with batch norm.""" - - def __init__(self, num_blocks, mid_channels, res_scale=1.0, - act_cfg=dict(type='ReLU'), trainable=True, name='ResBlock'): - self.num_blocks = num_blocks - self.mid_channels = mid_channels - self.res_scale = res_scale - self.name = name - self.trainable = trainable - self.act_cfg = act_cfg - - def build_block(self, x, idx): - """Build the residual block without bn.""" - fan_in = int(x.shape[-1]) - out = Conv2D(x, self.mid_channels, - kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=math.sqrt(1 / (100 * fan_in))), - trainable=self.trainable, name='conv{}a'.format(idx)) - out = ActLayer(self.act_cfg)(out) - out = Conv2D(out, self.mid_channels, - kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=math.sqrt(1 / (100 * fan_in))), - trainable=self.trainable, name='conv{}b'.format(idx)) - return x + out * self.res_scale - - def __call__(self, x): - """Forward function of block.""" - with tf.variable_scope(self.name): - for i in range(self.num_blocks): - x = self.build_block(x, i + 1) - return x - - -def Conv3D(x, filters, kernel_size=(1, 1, 1), strides=(1, 1, 1), padding='same', dilations=(1, 1, 1), use_bias=True, - kernel_initializer=None, bias_initializer=tf.zeros_initializer(), - trainable=True, name='Conv3D'): - """Convolution 3D layer.""" - if kernel_initializer is None: - kernel_initializer = get_initializer( - dict(type='kaiming_uniform', a=math.sqrt(5)), int(x.shape[-1]), filters, kernel_size) - if bias_initializer is None: - fan = calculate_fan(kernel_size, int(x.shape[-1])) - bound = 1 / math.sqrt(fan) - bias_initializer = tf.random_uniform_initializer(-bound, bound) - - x = tf.layers.conv3d( - x, - filters=filters, - kernel_size=kernel_size, - strides=strides, - padding=padding.lower(), - dilation_rate=dilations, - use_bias=use_bias, - kernel_initializer=kernel_initializer, - bias_initializer=bias_initializer, - trainable=trainable, - name=name, - ) - return x - - -class ResBlockChnAtten(object): - """Residual block with channels attention.""" - - def __init__(self, num_blocks, mid_channels, res_scale=1.0, - act_cfg=dict(type='ReLU'), trainable=True, name='ResBlock'): - self.num_blocks = num_blocks - self.mid_channels = mid_channels - self.res_scale = res_scale - self.name = name - self.trainable = trainable - self.act_cfg = act_cfg - - def build_block(self, x, idx): - """Build the block.""" - fan_in = int(x.shape[-1]) - out = Conv2D(x, self.mid_channels, - kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=math.sqrt(1 / (100 * fan_in))), - trainable=self.trainable, name='conv{}a'.format(idx)) - out = ActLayer(self.act_cfg)(out) - out = Conv2D(out, self.mid_channels, - kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=math.sqrt(1 / (100 * fan_in))), - trainable=self.trainable, name='conv{}b'.format(idx)) - B, H, W, C = out.get_shape().as_list() - chn_atten = tf.nn.avg_pool2d(out, ksize=[H, W], strides=1, padding='VALID') - chn_atten = tf.reshape(chn_atten, [-1, C]) - chn_atten = tf.layers.dense(chn_atten, C) - chn_atten = ActLayer(self.act_cfg)(chn_atten) - chn_atten = tf.sigmoid(tf.layers.dense(chn_atten, C)) - chn_atten = tf.math.multiply(out, tf.reshape(chn_atten, [B, 1, 1, C])) - out = tf.concat([out, chn_atten], axis=-1) - out = Conv2D(out, self.mid_channels, kernel_size=(1, 1), - kernel_initializer=tf.random_normal_initializer(mean=0.0, stddev=math.sqrt(1 / (100 * fan_in))), - trainable=self.trainable, name='conv{}c'.format(idx)) - return out + x - - def __call__(self, x): - """Forward function of residual block with channels attention.""" - with tf.variable_scope(self.name): - for i in range(self.num_blocks): - x = self.build_block(x, i + 1) - return x diff --git a/vega/networks/tensorflow/customs/edvr/dcn.py b/vega/networks/tensorflow/customs/edvr/dcn.py deleted file mode 100644 index 4e3dd72..0000000 --- a/vega/networks/tensorflow/customs/edvr/dcn.py +++ /dev/null @@ -1,311 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""DCN util modules.""" -import math -import tensorflow as tf -from .arch_util import Conv2D, pair -import vega - - -class DeformableConvLayer(object): - """Fast version of Deformable convolution layer. - - Only support kernel_size=3*3, stride=1, padding=1, num_groups=1, num_deformable_groups=1. - """ - - def __init__(self, - in_channels, - out_channels, - kernel_size, - strides=1, - padding='valid', - dilations=1, - use_bias=True, - num_groups=1, - num_deform_groups=1, - trainable=True): - super(DeformableConvLayer, self).__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.kernel_size = pair(kernel_size) - self.strides = pair(strides) - self.padding = padding.lower() - self.dilations = pair(dilations) - self.use_bias = use_bias - self.num_groups = num_groups - self.num_deform_groups = num_deform_groups - self.trainable = trainable - self.kernel_intermediate_shape = [] - self.build() - self.debug = False - self.use_zero = False - self.impl = 'gpu' if vega.is_gpu_device() else 'npu' - - def build(self): - """Build function for kernel and bias.""" - n = self.in_channels - for k in self.kernel_size: - n *= k - stdv = 1. / math.sqrt(n) - initializer = tf.random_uniform_initializer(-stdv, stdv) - - self.kernel_intermediate_shape = [*self.kernel_size, self.in_channels // self.num_groups, - self.out_channels // self.num_groups, self.num_groups] - - self.kernel = tf.get_variable( - "W", - [*self.kernel_size, self.in_channels // self.num_groups, self.out_channels], - initializer=initializer, - trainable=self.trainable) - if self.use_bias: - self.bias = tf.get_variable( - "b", - (self.out_channels,), - initializer=tf.constant_initializer(value=0.0), - trainable=self.trainable) - - def _cal_pads(self, ih, iw): - """Calculate paddings.""" - if self.padding == 'same': - strh, strw = self.strides - kh, kw = self.kernel_size - dilh, dilw = self.dilations - tails_h = ih % strh - tails_w = iw % strw - dkh = dilh * (kh - 1) + 1 - dkw = dilw * (kw - 1) + 1 - pad_h = dkh - tails_h if tails_h > 0 else dkh - strh - pad_w = dkw - tails_w if tails_w > 0 else dkw - strw - pads = [pad_h // 2, pad_h // 2 + pad_h % 2, pad_w // 2, pad_w // 2 + pad_w % 2] - else: - pads = [0, 0, 0, 0] - return pads - - def __call__(self, inputs, offset, mask=None): - """Forward function of deformable convolution layer.""" - if self.impl == 'gpu': - return self._call_tf(inputs, offset, mask) - elif self.impl == 'npu': - return self._call_npu(inputs, offset, mask) - - def _call_npu(self, inputs, offset, mask=None): - """Deformable convolution forward on npu.""" - from npu_bridge.tbe.npu_cube_ops import deformable_conv2d - _, ih, iw, _ = inputs.get_shape().as_list() - if mask is not None: - offset_all = tf.concat([offset, mask], axis=-1) - else: - # currently modulation must be valid - # The exception will be removed once the forward supports dcn_v1 - raise ValueError - offset_all = offset - - pads = self._cal_pads(ih, iw) - out = deformable_conv2d( - inputs, - self.kernel, - offset_all, - strides=[1] + list(self.strides) + [1], - pads=pads, - data_format='NHWC', - dilations=[1] + list(self.dilations) + [1], - groups=self.num_groups, - deformable_groups=self.num_deform_groups) - - if self.use_bias: - out = tf.nn.bias_add(out, self.bias) - return out - - def _call_tf(self, inputs, offset, mask=None): - """Deformable convolution forward on gpu.""" - def _get_in_bound_mask(x_, y_): - out_of_bound_x = tf.logical_or(tf.greater(x_, in_w - 1), tf.less(x_, 0)) - out_of_bound_y = tf.logical_or(tf.greater(y_, in_h - 1), tf.less(y_, 0)) - out_of_bound_mask = tf.logical_or(out_of_bound_x, out_of_bound_y) - return 1. - tf.to_float(out_of_bound_mask) - - inputs = self._pad_input(inputs) - bs, in_h, in_w, _ = list(map(int, inputs.shape)) - bs, out_h, out_w, _ = list(map(int, offset.shape)) - - # get x, y axis offset. Swap the order to 'x,y' instead of 'y,x', align with npu dcn op - x_off = offset[:, :, :, :offset.shape[-1] // 2] - y_off = offset[:, :, :, offset.shape[-1] // 2:] - - # input feature map gird coordinates - y, x = self._get_conv_indices(in_h, in_w) - # y, x = tf.cast(y, tf.float32), tf.cast(x, tf.float32) - y, x = [tf.to_float(i) for i in [y, x]] - y, x = [tf.tile(i, [1, 1, 1, self.num_deform_groups]) for i in [y, x]] - y, x = y + y_off, x + x_off - - # get four coordinates of points around (x, y) - y0, x0 = [tf.to_int32(tf.floor(i)) for i in [y, x]] - y1, x1 = y0 + 1, x0 + 1 - - # according to the strategy, prepare in_bound mask if use zero. - # In fact, gathernd on GPU and NPU will take 0 if the index is out-of-bound, - # while CPU will throw an error. Therefore, do an explicit masking - if self.use_zero: - m0 = _get_in_bound_mask(x0, y0) - m1 = _get_in_bound_mask(x1, y0) - m2 = _get_in_bound_mask(x0, y1) - m3 = _get_in_bound_mask(x1, y1) - - # clip the indices - y0, y, y1 = [tf.clip_by_value(i, 0, in_h - 1) for i in [y0, y, y1]] - x0, x, x1 = [tf.clip_by_value(i, 0, in_w - 1) for i in [x0, x, x1]] - - # get pixel values - indices = [[y0, x0], [y0, x1], [y1, x0], [y1, x1]] - p0, p1, p2, p3 = [self._get_pixel_values_at_point(inputs, i) for i in indices] - - # cast to float - x0, x, x1, y0, y, y1 = [tf.to_float(i) for i in [x0, x, x1, y0, y, y1]] - - # weights - # Re-formulate the weights calculation, ensuring w0+w1+w2+w3=1. - y_res = y - y0 - x_res = x - x0 - w0 = (1. - y_res) * (1. - x_res) - w1 = (1. - y_res) * x_res - w2 = y_res * (1. - x_res) - w3 = y_res * x_res - - if self.use_zero: - w0 = m0 * w0 - w1 = m1 * w1 - w2 = m2 * w2 - w3 = m3 * w3 - - w0, w1, w2, w3 = [tf.reshape(i, [*i.get_shape()[:3], self.num_deform_groups, *self.kernel_size, 1]) - for i in [w0, w1, w2, w3]] - # reshape of px is done in gather process. The next two lines will be removed in the next comment - # p0, p1, p2, p3 = [tf.reshape(i, [*i.get_shape()[:3], self.num_deform_groups, *self.kernel_size,-1]) - # for i in [p0, p1, p2, p3]] - - # bilinear interpolation - pixels = tf.add_n([w0 * p0, w1 * p1, w2 * p2, w3 * p3]) - - if mask is not None: - pixels = tf.reshape(mask, [*mask.get_shape()[:3], self.num_deform_groups, *self.kernel_size, 1]) * pixels - - # reshape the "big" feature map - # pixels = tf.reshape(pixels, [bs, out_h, out_w, *self.kernel_size, -1]) - pixels = tf.transpose(pixels, [0, 1, 4, 2, 5, 3, 6]) - pixels = tf.reshape(pixels, [bs, out_h * self.kernel_size[0], out_w * self.kernel_size[1], -1]) - - # conv - # TODO abstract a group_conv class? - kernel_reshaped = tf.reshape(self.kernel, self.kernel_intermediate_shape) - ich = pixels.shape[-1] // self.num_groups - out = tf.concat([tf.nn.conv2d( - pixels[:, :, :, i * ich:(i + 1) * ich], - kernel_reshaped[:, :, :, :, i], - strides=self.kernel_size, - padding='VALID', - ) - for i in range(self.num_groups)], axis=-1) - if self.use_bias: - out = tf.nn.bias_add(out, self.bias) - - if self.debug: - return out, w0, w1, w2, w3, p0, p1, p2, p3, pixels, offset, x, y, x0, x1, y0, y1 - else: - return out - - def _pad_input(self, x): - """Get padding input.""" - if self.padding == 'same': - size = list(map(int, x.shape))[1:3] - pad = [] - for i in range(2): - dilated_filter_size = 1 + self.dilations[i] * (self.kernel_size[i] - 1) - same_output = (size[i] + self.strides[i] - 1) // self.strides[i] - valid_output = (size[i] - dilated_filter_size + self.strides[i]) // self.strides[i] - if same_output > valid_output: - p0 = (dilated_filter_size - 1) // 2 - pad.append([p0, dilated_filter_size - 1 - p0]) - else: - pad.append([0, 0]) - - if sum([sum(p) for p in pad]) != 0: - x = tf.pad(x, [[0, 0]] + pad + [[0, 0]]) - - return x - - def _get_conv_indices(self, feat_h, feat_w): - """Get the x, y coordinates in the window when a filter sliding on the feature map.""" - x, y = tf.meshgrid(tf.range(feat_w), tf.range(feat_h)) - x, y = [tf.reshape(i, [1, *i.get_shape(), 1]) for i in [x, y]] # shape [1, h, w, 1] - x, y = [tf.image.extract_image_patches(i, - [1, *self.kernel_size, 1], - [1, *self.strides, 1], - [1, *self.dilations, 1], - 'VALID') - for i in [x, y]] # shape [1, out_h, out_w, filter_h * filter_w] - return y, x - - def _get_pixel_values_at_point(self, inputs, indices): - """Get pixel values at specific point.""" - y, x = indices - bs, h, w, c = y.get_shape().as_list()[0: 4] - - if c % self.num_deform_groups != 0 or inputs.shape[-1] % self.num_deform_groups != 0: - raise ValueError - - per_group_offset_ch = c // self.num_deform_groups # kh*kw - per_group_input_ch = inputs.shape[-1] // self.num_deform_groups - batch_idx = tf.reshape(tf.range(0, bs), (bs, 1, 1, 1)) - b = tf.tile(batch_idx, (1, h, w, per_group_offset_ch)) - - outs = [] - for j in range(self.num_deform_groups): - pixel_idx = tf.stack([b, y[:, :, :, j * per_group_offset_ch:(j + 1) * per_group_offset_ch], - x[:, :, :, j * per_group_offset_ch:(j + 1) * per_group_offset_ch]], - axis=-1) # [bs, h, w, per_group_offset_ch, 3] - outs.append(tf.gather_nd(inputs[:, :, :, j * per_group_input_ch:(j + 1) * per_group_input_ch], pixel_idx)) - outs = tf.concat(outs, axis=-1) # [bs, h, w, per_group_offset_ch, cin] - - # reshape and transpose the outputs in order to align with the outer axis order - outs = tf.reshape(outs, [*outs.shape[:3], *self.kernel_size, self.num_deform_groups, -1]) - return tf.transpose(outs, [0, 1, 2, 5, 3, 4, 6]) - - -def DCNPack(x, extra_feat, out_channels, kernel_size=(3, 3), strides=(1, 1), padding='same', dilations=(1, 1), - use_bias=True, num_groups=1, num_deform_groups=1, trainable=True, dcn_version='v2', name='DCN'): - """Deformable convolution encapsulation that acts as normal convolution layers.""" - with tf.variable_scope(name): - x = tf.cast(x, tf.float32) - if dcn_version == 'v1': - offset = Conv2D(extra_feat, num_deform_groups * 2 * kernel_size[0] * kernel_size[1], - kernel_size=kernel_size, strides=strides, padding=padding, dilations=dilations, - use_bias=use_bias, trainable=trainable, name='conv_offset') - offset = tf.cast(offset, tf.float32) - mask = None - elif dcn_version == 'v2': - conv_offset = Conv2D(extra_feat, num_deform_groups * 3 * kernel_size[0] * kernel_size[1], - kernel_size=kernel_size, strides=strides, padding=padding, dilations=dilations, - use_bias=use_bias, trainable=trainable, name='conv_offset') - conv_offset = tf.cast(conv_offset, tf.float32) - offset = conv_offset[:, :, :, :num_deform_groups * 2 * kernel_size[0] * kernel_size[1]] - mask = conv_offset[:, :, :, num_deform_groups * 2 * kernel_size[0] * kernel_size[1]:] - mask = tf.nn.sigmoid(mask) - else: - raise NotImplementedError - - out = DeformableConvLayer( - in_channels=int(x.shape[-1]), out_channels=out_channels, - kernel_size=kernel_size, strides=strides, padding=padding, dilations=dilations, - use_bias=use_bias, num_groups=num_groups, num_deform_groups=num_deform_groups, - trainable=trainable)(x, offset, mask) - - return out diff --git a/vega/networks/tensorflow/customs/edvr/edvr.py b/vega/networks/tensorflow/customs/edvr/edvr.py deleted file mode 100644 index a5385d4..0000000 --- a/vega/networks/tensorflow/customs/edvr/edvr.py +++ /dev/null @@ -1,160 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""EDVR network.""" -import tensorflow as tf -from vega.common import ClassType, ClassFactory -from .edvr_util import PCDAlignment, TSAFusion, LAAlignment, SeparateNonLocal -from .arch_util import Conv2D, ActLayer, ConvModule, depth_to_space, resize, tf_split, ResBlockNoBN, ResBlockChnAtten - - -@ClassFactory.register(ClassType.NETWORK) -class EDVR(object): - """EDVR network.""" - - def __init__(self, num_in_ch=3, num_out_ch=3, num_feat=64, num_frame=5, deformable_groups=8, num_extract_block=5, - num_reconstruct_block=10, center_frame_idx=2, hr_in=False, with_predeblur=False, with_tsa=True, - align_type='pcd', align_step=False, with_snl=False, with_chn_atten=False): - self.with_tsa = with_tsa - self.mid_channels = num_feat - self.num_deform_groups = deformable_groups - self.num_blocks_extraction = num_extract_block - self.num_blocks_reconstruction = num_reconstruct_block - self.center_frame_idx = center_frame_idx if center_frame_idx is not None else num_frame // 2 - self.num_frames = num_frame - self.pcd_align = PCDAlignment(self.mid_channels, self.num_deform_groups) - self.tsa_fusion = TSAFusion(self.mid_channels, self.num_frames, self.center_frame_idx) - self.la_align = LAAlignment(num_feat=num_feat, radius=3, normalize=False) - self.patial_align = self.pcd_align if align_type == 'pcd' else self.la_align - self.align_step = align_step - self.upsample_mode = 'bilinear' - self.with_snl = with_snl - self.separate_no_local = SeparateNonLocal(self.mid_channels) - self.with_chn_atten = with_chn_atten - - def feature_extraction(self, x, act_cfg=dict(type='LeakyRelu', alpha=0.1)): - """Feature extraction part of EDVR.""" - # extract LR features - with tf.variable_scope('extraction'): - # L1 - l1_feat = tf.reshape(x, [-1, x.shape[2], x.shape[3], x.shape[4]]) - l1_feat = Conv2D(l1_feat, self.mid_channels, name='conv_first') - l1_feat = ActLayer(act_cfg)(l1_feat) - if self.with_chn_atten: - l1_feat = ResBlockChnAtten(num_blocks=self.num_blocks_extraction, - mid_channels=self.mid_channels)(l1_feat) - else: - l1_feat = ResBlockNoBN(num_blocks=self.num_blocks_extraction, mid_channels=self.mid_channels)(l1_feat) - # l1_feat = ResBlockNoBN(num_blocks=self.num_blocks_extraction, mid_channels=self.mid_channels)(l1_feat) - # L2 - l2_feat = ConvModule(l1_feat, self.mid_channels, strides=[2, 2], act_cfg=act_cfg, name='feat_l2_conv1') - l2_feat = ConvModule(l2_feat, self.mid_channels, act_cfg=act_cfg, name='feat_l2_conv2') - # L3 - l3_feat = ConvModule(l2_feat, self.mid_channels, strides=[2, 2], act_cfg=act_cfg, name='feat_l3_conv1') - l3_feat = ConvModule(l3_feat, self.mid_channels, act_cfg=act_cfg, name='feat_l3_conv2') - - l1_feat = tf.reshape(l1_feat, - [int(l1_feat.shape[0]) // self.num_frames, self.num_frames, - int(l1_feat.shape[1]), int(l1_feat.shape[2]), -1]) - l2_feat = tf.reshape(l2_feat, - [int(l2_feat.shape[0]) // self.num_frames, self.num_frames, - int(l2_feat.shape[1]), int(l2_feat.shape[2]), -1]) - l3_feat = tf.reshape(l3_feat, - [int(l3_feat.shape[0]) // self.num_frames, self.num_frames, - int(l3_feat.shape[1]), int(l3_feat.shape[2]), -1]) - - return l1_feat, l2_feat, l3_feat - - def reconstruction(self, feat, x_center, act_cfg=dict(type='LeakyRelu', alpha=0.1)): - """Reconstruction part of EDVR.""" - # reconstruction - with tf.variable_scope('reconstruction'): - if self.with_chn_atten: - out = ResBlockChnAtten(num_blocks=self.num_blocks_reconstruction, mid_channels=self.mid_channels)(feat) - else: - out = ResBlockNoBN(num_blocks=self.num_blocks_reconstruction, mid_channels=self.mid_channels)(feat) - out = Conv2D(out, self.mid_channels * 2 ** 2, name='upsample1') - out = depth_to_space(out, 2) - out = Conv2D(out, self.mid_channels * 2 ** 2, name='upsample2') - out = depth_to_space(out, 2) - out = Conv2D(out, self.mid_channels, name='conv_hr') - out = ActLayer(act_cfg)(out) - out = Conv2D(out, 3, name='conv_last') - - base = resize( - x_center, size=[x_center.shape[1] * 4, x_center.shape[2] * 4], align_corners=False, - name='img_upsample', method=self.upsample_mode) - base = tf.cast(base, tf.float32) - out = tf.cast(out, tf.float32) - out += base - - return out - - def __call__(self, x): - """Forward function of EDVR.""" - # shape of x: [B,T_in,H,W,C] - with tf.variable_scope('G'): - x = tf.transpose(x, [0, 1, 3, 4, 2]) - x_list = tf.split(x, self.num_frames, axis=1) - x_center = tf.squeeze(x_list[self.num_frames // 2], axis=1) - - # extract LR features - l1_feat, l2_feat, l3_feat = self.feature_extraction(x) - - l1_feat_list = tf_split(l1_feat, self.num_frames, 1, keep_dims=False) - l2_feat_list = tf_split(l2_feat, self.num_frames, 1, keep_dims=False) - l3_feat_list = tf_split(l3_feat, self.num_frames, 1, keep_dims=False) - - ref_feats = [ - l1_feat_list[self.num_frames // 2], - l2_feat_list[self.num_frames // 2], - l3_feat_list[self.num_frames // 2] - ] - aligned_feat = [] - - if self.align_step: - act_cfg = dict(type='LeakyRelu', alpha=0.1) - for i in range(self.num_frames): - neighbor_feats = [l1_feat_list[i], l2_feat_list[i], l3_feat_list[i]] - if i == 0 or i == self.num_frames - 1: - next = 1 if i == 0 else -1 - temp_ref_feats = [l1_feat_list[i + next], l2_feat_list[i + next], l3_feat_list[i + next]] - l1_aligned_feat = self.patial_align(neighbor_feats, temp_ref_feats) - l2_aligned_feat = ConvModule(l1_aligned_feat, self.mid_channels, strides=[2, 2], - act_cfg=act_cfg, name='l2_aligned_{}'.format(i)) - l3_aligned_feat = ConvModule(l2_aligned_feat, self.mid_channels, strides=[2, 2], - act_cfg=act_cfg, name='l3_aligned_{}'.format(i)) - neighbor_feats = [l1_aligned_feat, l2_aligned_feat, l3_aligned_feat] - aligned_feat.append(self.patial_align(neighbor_feats, ref_feats)) - else: - for i in range(self.num_frames): - neighbor_feats = [ - l1_feat_list[i], - l2_feat_list[i], - l3_feat_list[i] - ] - aligned_feat.append(self.patial_align(neighbor_feats, ref_feats)) - - aligned_feat = tf.stack(aligned_feat, axis=1) # (n, t, h, w, c) - - if self.with_snl: - aligned_feat = self.separate_no_local(aligned_feat) - if self.with_tsa: - feat = self.tsa_fusion(aligned_feat) - else: - aligned_feat = tf.transpose(aligned_feat, [0, 2, 3, 1, 4]) - aligned_feat = tf.reshape(aligned_feat, - [aligned_feat.shape[0], aligned_feat.shape[1], aligned_feat.shape[2], -1]) - feat = Conv2D(aligned_feat, self.mid_channels, kernel_size=[1, 1], name='fusion') - - # reconstruction - out = self.reconstruction(feat, x_center) - out = tf.transpose(out, [0, 3, 1, 2]) - return out diff --git a/vega/networks/tensorflow/customs/edvr/edvr_util.py b/vega/networks/tensorflow/customs/edvr/edvr_util.py deleted file mode 100644 index 0b6d8f9..0000000 --- a/vega/networks/tensorflow/customs/edvr/edvr_util.py +++ /dev/null @@ -1,273 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""EDVR util modules.""" -import tensorflow as tf -from .arch_util import Conv2D, Conv3D, ActLayer, ConvModule, resize, tf_split -from .dcn import DCNPack - - -class PCDAlignment(object): - """Module of yramid, cascading and deformable alignment.""" - - def __init__(self, num_feat=64, deformable_groups=8, dcn_version='v2'): - self.mid_channels = num_feat - self.num_deform_groups = deformable_groups - self.dcn_version = dcn_version - self.num_groups = 1 - self.upsample_mode = 'bilinear' - - def __call__(self, neighbor_feats, ref_feats, act_cfg=dict(type='LeakyRelu', alpha=0.1), - name='pcd_align', reuse=tf.AUTO_REUSE): - """Forward function of PCDAlignment.""" - with tf.variable_scope(name, reuse=reuse): - # The number of pyramid levels is 3. - if len(neighbor_feats) != 3 or len(ref_feats) != 3: - raise Exception('The length of neighbor_feats and ref_feats must be both 3, ' - 'but got {} and {}'.format(len(neighbor_feats), len(ref_feats))) - - # Pyramids - upsampled_offset, upsampled_feat = None, None - for i in range(3, 0, -1): - with tf.variable_scope('level{}'.format(i)): - offset = tf.concat([neighbor_feats[i - 1], ref_feats[i - 1]], axis=-1) - offset = ConvModule(offset, self.mid_channels, act_cfg=act_cfg, name='offset_conv1') - if i == 3: - offset = ConvModule(offset, self.mid_channels, act_cfg=act_cfg, name='offset_conv2') - else: - offset = tf.concat([offset, upsampled_offset], axis=-1) - offset = ConvModule(offset, self.mid_channels, act_cfg=act_cfg, name='offset_conv2') - offset = ConvModule(offset, self.mid_channels, act_cfg=act_cfg, name='offset_conv3') - - feat = DCNPack(neighbor_feats[i - 1], offset, self.mid_channels, kernel_size=[3, 3], padding='same', - num_deform_groups=self.num_deform_groups, num_groups=self.num_groups, - name='dcn_l{}'.format(i), dcn_version=self.dcn_version) - if i == 3: - feat = ActLayer(act_cfg)(feat) - else: - feat = tf.concat([feat, upsampled_feat], axis=-1) - feat = ConvModule(feat, self.mid_channels, act_cfg=act_cfg if i == 2 else None, - name='feat_conv') - - if i > 1: - # upsample offset and features - # upsampled_offset = tf.image.resize_bilinear( - upsampled_offset = resize( - offset, size=[offset.shape[1] * 2, offset.shape[2] * 2], align_corners=False, - name='upsample_offset{}'.format(i), method=self.upsample_mode) - upsampled_offset = upsampled_offset * 2 - # upsampled_feat = tf.image.resize_bilinear( - upsampled_feat = resize( - feat, size=[feat.shape[1] * 2, feat.shape[2] * 2], align_corners=False, - name='upsample_feat{}'.format(i), method=self.upsample_mode) - - # Cascading - offset = tf.concat([feat, ref_feats[0]], axis=-1) - offset = ConvModule(offset, self.mid_channels, act_cfg=act_cfg, name='cas_offset_conv1') - offset = ConvModule(offset, self.mid_channels, act_cfg=act_cfg, name='cas_offset_conv2') - feat = DCNPack(feat, offset, self.mid_channels, kernel_size=[3, 3], padding='same', - num_deform_groups=self.num_deform_groups, name='dcn_cas', dcn_version=self.dcn_version) - feat = ActLayer(act_cfg)(feat) - - return feat - - -class TSAFusion(object): - """Module of fusion with temporal and spatial attention.""" - - def __init__(self, num_feat=64, num_frame=5, center_frame_idx=2): - self.mid_channels = num_feat - self.num_frames = num_frame - self.center_frame_idx = center_frame_idx - self.upsample_mode = 'bilinear' - - def __call__(self, aligned_feat, act_cfg=dict(type='LeakyRelu', alpha=0.1)): - """Forward function of TSAFusion.""" - with tf.variable_scope('tsa_fusion'): - n, t, h, w, c = list(map(int, aligned_feat.shape)) - # temporal attention - aligned_feat_list = tf.split(aligned_feat, self.num_frames, axis=1) - # aligned_feat_list = tf_split(aligned_feat, self.num_frames, axis=1) - embedding_ref = Conv2D( - tf.squeeze(aligned_feat_list[self.num_frames // 2], axis=1), - # aligned_feat_list[self.num_frames // 2], - self.mid_channels, - name='temporal_attn1') - emb = Conv2D(tf.reshape(aligned_feat, [-1, h, w, c]), self.mid_channels, name='temporal_attn2') - emb = tf.reshape(emb, [n, t, h, w, -1]) - emb = tf.cast(emb, tf.float32) - emb_list = tf_split(emb, self.num_frames, axis=1, keep_dims=False) - - corr_l = [] # correlation list - for i in range(t): - emb_neighbor = emb_list[i] - corr = tf.reduce_sum(emb_neighbor * embedding_ref, axis=-1, keep_dims=True) # (n, h, w, 1) - # corr_prob = tf.nn.sigmoid(corr) - # corr_l.append(corr_prob * aligned_feat_list[i]) - # aligned_feat = tf.concat(corr_l, axis=-1) - corr_l.append(corr) - corr_prob = tf.nn.sigmoid(tf.stack(corr_l, axis=1)) # (n, t, h, w, 1) - aligned_feat = corr_prob * aligned_feat - - # fusion - aligned_feat = tf.transpose(aligned_feat, [0, 2, 3, 1, 4]) - aligned_feat = tf.reshape(aligned_feat, [n, h, w, -1]) - feat = ConvModule(aligned_feat, self.mid_channels, kernel_size=(1, 1), act_cfg=act_cfg, name='feat_fusion') - - # spatial attention - attn = ConvModule(aligned_feat, self.mid_channels, kernel_size=(1, 1), act_cfg=act_cfg, - name='spatial_attn1') - attn_max = tf.nn.max_pool2d(attn, 3, 2, 'SAME') - attn_avg = tf.nn.avg_pool(attn, 3, 2, 'SAME') - attn = ConvModule(tf.concat([attn_max, attn_avg], axis=-1), self.mid_channels, kernel_size=(1, 1), - act_cfg=act_cfg, name='spatial_attn2') - # pyramid levels - attn_level = ConvModule(attn, self.mid_channels, kernel_size=(1, 1), act_cfg=act_cfg, - name='spatial_attn_l1') - attn_max = tf.nn.max_pool2d(attn_level, 3, 2, 'SAME') - attn_avg = tf.nn.avg_pool(attn_level, 3, 2, 'SAME') - attn_level = ConvModule(tf.concat([attn_max, attn_avg], axis=-1), self.mid_channels, act_cfg=act_cfg, - name='spatial_attn_l2') - attn_level = ConvModule(attn_level, self.mid_channels, act_cfg=act_cfg, name='spatial_attn_l3') - # attn_level = tf.image.resize_bilinear( - attn_level = resize( - attn_level, size=[attn_level.shape[1] * 2, attn_level.shape[2] * 2], align_corners=False, - name='upsample1', method=self.upsample_mode) - - attn = ConvModule(attn, self.mid_channels, act_cfg=act_cfg, name='spatial_attn3') + attn_level - attn = ConvModule(attn, self.mid_channels, kernel_size=(1, 1), act_cfg=act_cfg, name='spatial_attn4') - # attn = tf.image.resize_bilinear( - attn = resize( - attn, size=[attn.shape[1] * 2, attn.shape[2] * 2], align_corners=False, - name='upsample2', method=self.upsample_mode) - attn = Conv2D(attn, self.mid_channels, name='spatial_attn5') - attn = ConvModule(attn, self.mid_channels, kernel_size=(1, 1), act_cfg=act_cfg, name='spatial_attn_add1') - attn_add = Conv2D(attn, self.mid_channels, kernel_size=(1, 1), name='spatial_attn_add2') - - attn = tf.cast(attn, tf.float32) - attn = tf.nn.sigmoid(attn) - - feat = tf.cast(feat, tf.float32) - attn_add = tf.cast(attn_add, tf.float32) - - # after initialization, * 2 makes (attn * 2) to be close to 1. - feat = feat * attn * 2 + attn_add - return feat - - -class SeparateNonLocal(object): - """Module of separated non-local.""" - - def __init__(self, num_feat=64): - self.num_feat = num_feat - - def __call__(self, x): - """Forward function of separated no-local.""" - with tf.variable_scope('NonLocal'): - B, T, H, W, C = x.get_shape().as_list() - - x1 = tf.cast(Conv3D(x, self.num_feat, name='spatial_1'), tf.float16) - x2 = tf.cast(Conv3D(x, self.num_feat, name='spatial_2'), tf.float16) - x3 = tf.cast(Conv3D(x, self.num_feat, name='spatial_3'), tf.float16) - x1 = tf.reshape(tf.transpose(x1, [0, 2, 3, 1, 4]), [-1, H * W, T * C]) - x2 = tf.reshape(tf.transpose(x2, [0, 1, 4, 2, 3]), [-1, T * C, H * W]) - f = tf.nn.softmax(tf.matmul(x1, x2)) # B * (H*W) * (H*W) - x3 = tf.reshape(tf.transpose(x3, [0, 2, 3, 1, 4]), [-1, H * W, T * C]) - y1 = tf.reshape(tf.matmul(f, x3), [-1, H, W, T, C]) - y1 = tf.cast(tf.transpose(y1, [0, 3, 1, 2, 4]), tf.float32) - - x1 = tf.cast(Conv3D(x, self.num_feat, name='channel_1'), tf.float16) - x2 = tf.cast(Conv3D(x, self.num_feat, name='channel_2'), tf.float16) - x3 = tf.cast(Conv3D(x, self.num_feat, name='channel_3'), tf.float16) - x1 = tf.reshape(tf.transpose(x1, [0, 4, 2, 3, 1]), [-1, C, H * W * T]) - x2 = tf.reshape(x2, [-1, T * H * W, C]) - f = tf.nn.softmax(tf.matmul(x1, x2)) # B * C * C - x3 = tf.reshape(tf.transpose(x3, [0, 4, 2, 3, 1]), [-1, C, H * W * T]) - y2 = tf.reshape(tf.matmul(f, x3), [-1, C, H, W, T]) - y2 = tf.cast(tf.transpose(y2, [0, 4, 2, 3, 1]), tf.float32) - - x1 = tf.cast(Conv3D(x, self.num_feat, name='temporal_1'), tf.float16) - x2 = tf.cast(Conv3D(x, self.num_feat, name='temporal_2'), tf.float16) - x3 = tf.cast(Conv3D(x, self.num_feat, name='temporal_3'), tf.float16) - x1 = tf.reshape(x1, [-1, T, H * W * C]) - x2 = tf.reshape(tf.transpose(x2, [0, 2, 3, 4, 1]), [-1, H * W * C, T]) - f = tf.nn.softmax(tf.matmul(x1, x2)) # B * T * T - x3 = tf.reshape(x3, [-1, T, H * W * C]) - y3 = tf.cast(tf.reshape(tf.matmul(f, x3), [-1, T, H, W, C]), tf.float32) - - return y1 + y2 + y3 + x - - -class LAAlignment(object): - """Module of local aggregator.""" - - def __init__(self, num_feat=64, radius=3, normalize=False): - self.num_feat = num_feat - self.upsample_mode = 'bilinear' - self.local_agg = LocalAggregator(radius=radius, nf=num_feat, normalize=normalize) - - def __call__(self, neighbor_feats, ref_feats): - """Forward function of local aggregator alignment.""" - with tf.variable_scope('LA_Alignment', reuse=tf.AUTO_REUSE): - aligned_feats = [] - for i in range(3, 0, -1): - neighbor_feat = neighbor_feats[i - 1] - ref_feat = ref_feats[i - 1] - aligned_feat = self.local_agg(ref_feat, neighbor_feat, name='local_agg_{}'.format(i)) - while i > 1: - aligned_feat = resize(aligned_feat, size=[aligned_feat.shape[1] * 2, aligned_feat.shape[2] * 2], - align_corners=False, method=self.upsample_mode) - i -= 1 - aligned_feats.append(aligned_feat) - feat = tf.concat(aligned_feats, axis=-1) - feat = ConvModule(feat, self.num_feat, act_cfg=dict(type='LeakyRelu', alpha=0.1), name='la') - return feat - - -class LocalAggregator(object): - """Local Aggregator.""" - - def __init__(self, radius, nf, normalize=False): - self.normalize = normalize - self.radius = radius - self.num_feat = nf - self.offsets = [(i, j) for i in range(-radius, radius + 1) for j in range(-radius, radius + 1)] - self.act_cfg = dict(type='LeakyRelu', alpha=0.1) - - def __call__(self, ref_feat, feat, name='local_agg'): - """Forward function of local aggregator.""" - with tf.variable_scope(name): - B, H, W, C = list(map(int, ref_feat.shape)) - ref_feat = ConvModule(ref_feat, self.num_feat // 2, act_cfg=self.act_cfg, name='conv_ref') - pad_feat = tf.keras.layers.ZeroPadding2D(padding=self.radius)(feat) - feat = ConvModule(pad_feat, self.num_feat // 2, act_cfg=self.act_cfg, name='conv_feat') - if self.normalize: - ref_feat = tf.nn.l2_normalize(ref_feat, dim=-1) - feat = tf.nn.l2_normalize(feat, dim=-1) - correlations = [] - shiftedFeats = [] - for i, j in self.offsets: - shiftedFeat = tf.image.crop_to_bounding_box(pad_feat, self.radius + i, self.radius + j, H, W) - shiftedFeats.append(shiftedFeat) - - shiftedFeat1 = tf.image.crop_to_bounding_box(feat, self.radius + i, self.radius + j, H, W) - correlation = tf.reduce_sum(tf.math.multiply(ref_feat, shiftedFeat1), axis=-1, keepdims=False) - correlations.append(correlation) - correlations = tf.stack(correlations, axis=1) - correlations = tf.reshape(correlations, (-1, len(self.offsets))) - correlations = tf.nn.softmax(correlations, axis=1) - correlations = tf.reshape(correlations, (B, -1, H, W)) - indices = [i for i in range(B) for _ in range(C)] - correlations = tf.gather(correlations, indices) - shiftedFeats = tf.stack(shiftedFeats, axis=1) - shiftedFeats = tf.reshape(shiftedFeats, (-1, len(self.offsets), H, W)) - result = tf.reduce_sum(tf.math.multiply(shiftedFeats, correlations), axis=1, keepdims=False) - result = tf.reshape(result, (B, H, W, C)) - return result diff --git a/vega/networks/tensorflow/customs/gcn_regressor.py b/vega/networks/tensorflow/customs/gcn_regressor.py deleted file mode 100644 index f40b2e8..0000000 --- a/vega/networks/tensorflow/customs/gcn_regressor.py +++ /dev/null @@ -1,86 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""The Graph Convolution Network model.""" -import logging -import math -import tensorflow as tf -from vega.common import ClassType, ClassFactory - - -logger = logging.getLogger(__name__) - - -class GraphConvolution(object): - """Graph Convolution Layer.""" - - def __init__(self, in_features, out_features, bias=True, initializer=None, name='GC'): - self.in_features = in_features - self.out_features = out_features - self.if_bias = bias - self.name = name - self.reset_parameters(initializer) - - def reset_parameters(self, initializer=None): - """Reset parameters of layer.""" - stdv = 1. / math.sqrt(self.out_features) - if initializer is None: - initializer = tf.random_uniform_initializer(-stdv, stdv) - self.weight = tf.get_variable(self.name + '/W', [self.in_features, self.out_features], - initializer=initializer, trainable=True) - self.bias = None - if self.if_bias: - self.bias = tf.get_variable(self.name + '/B', [self.out_features], - initializer=initializer, trainable=True) - - def __call__(self, input, adj): - """Forward function of graph convolution layer.""" - with tf.variable_scope(self.name): - support = tf.matmul(input, self.weight) - output = tf.matmul(adj, support) - if self.bias is not None: - return output + self.bias - else: - return output - - -@ClassFactory.register(ClassType.NETWORK) -class GCNRegressor(object): - """Graph Convolution Network for regression.""" - - def __init__(self, nfeat, ifsigmoid, layer_size=64): - self.nfeat = nfeat - self.ifsigmoid = ifsigmoid - self.size = layer_size - self.gc_initializer = tf.random_uniform_initializer(-0.05, 0.05) - - def _gc_bn_act(self, feat, adj, idx): - nfeat = feat.get_shape().as_list()[-1] - feat = GraphConvolution(nfeat, self.size, True, self.gc_initializer, 'GC_{}'.format(idx))(feat, adj) - feat = tf.nn.relu(tf.layers.BatchNormalization()(tf.transpose(feat, [0, 2, 1]))) - feat = tf.transpose(feat, [0, 2, 1]) - return feat - - def __call__(self, input): - """Forward function of GCN.""" - with tf.variable_scope('GCNRegressor'): - shape = input.get_shape().as_list() - adj, feat = tf.split(input, [shape[1], shape[2] - shape[1]], axis=2) - n = 4 - for i in range(n): - feat = self._gc_bn_act(feat, adj, i) - feat_list = tf.split(feat, feat.shape[1], axis=1) - embeddings = tf.squeeze(feat_list[-1], axis=1) - y = tf.layers.Dense(1)(embeddings) - y = tf.squeeze(y, axis=1) - if self.ifsigmoid: - return tf.math.sigmoid(y) - else: - return y diff --git a/vega/networks/tensorflow/detectors/__init__.py b/vega/networks/tensorflow/detectors/__init__.py deleted file mode 100644 index 7dc452a..0000000 --- a/vega/networks/tensorflow/detectors/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .faster_rcnn import FasterRCNN -from .faster_rcnn_trainer_callback import FasterRCNNTrainerCallback diff --git a/vega/networks/tensorflow/detectors/faster_rcnn.py b/vega/networks/tensorflow/detectors/faster_rcnn.py deleted file mode 100644 index 9ffd32f..0000000 --- a/vega/networks/tensorflow/detectors/faster_rcnn.py +++ /dev/null @@ -1,240 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Defined faster rcnn detector.""" -import functools -import tensorflow as tf - -from object_detection.core import balanced_positive_negative_sampler as sampler -from object_detection.core import losses -from object_detection.core import post_processing -from object_detection.core import standard_fields as fields -from object_detection.core import target_assigner -from object_detection.utils import spatial_transform_ops as spatial_ops -from object_detection.meta_architectures import faster_rcnn_meta_arch - -from vega.common import ClassType, ClassFactory -from vega.networks.network_desc import NetworkDesc -from vega.networks.tensorflow.utils.hyperparams import scope_generator -from vega.networks.tensorflow.utils.image_resizer import image_resizer_util -from vega.networks.tensorflow.utils.post_processing import post_processing_util - - -@ClassFactory.register(ClassType.NETWORK) -class FasterRCNN(object): - """Faster RCNN.""" - - def __init__(self, desc): - """Init faster rcnn. - - :param desc: config dict - """ - super(FasterRCNN, self).__init__() - - self.num_classes = int(desc.num_classes) - self.number_of_stages = int(desc.number_of_stages) - - # Backbone for feature extractor - self.feature_extractor = NetworkDesc(desc.backbone).to_model() - - # First stage anchor generator - self.first_stage_anchor_generator = NetworkDesc( - desc["first_stage_anchor_generator"]).to_model() - - # First stage target assigner - self.use_matmul_gather_in_matcher = False # Default - self.first_stage_target_assigner = target_assigner.create_target_assigner( - 'FasterRCNN', 'proposal', use_matmul_gather=self.use_matmul_gather_in_matcher) - - # First stage box predictor - self.first_stage_box_predictor_arg_scope_fn = scope_generator.get_hyper_params_scope( - desc.first_stage_box_predictor_conv_hyperparams) - self.first_stage_atrous_rate = 1 # Default: 1 - self.first_stage_box_predictor_kernel_size = 3 # Default - self.first_stage_box_predictor_depth = 512 # Default - self.first_stage_minibatch_size = 256 # Default - - # First stage sampler - self.first_stage_positive_balance_fraction = 0.5 # Default - self.use_static_balanced_label_sampler = False # Default - self.use_static_shapes = False # Default - self.first_stage_sampler = sampler.BalancedPositiveNegativeSampler( - positive_fraction=self.first_stage_positive_balance_fraction, - is_static=(self.use_static_balanced_label_sampler and self.use_static_shapes)) - - # First stage NMS - self.first_stage_nms_score_threshold = 0.0 - self.first_stage_nms_iou_threshold = 0.7 - self.first_stage_max_proposals = 300 - self.use_partitioned_nms_in_first_stage = True # Default - self.use_combined_nms_in_first_stage = False # Default - self.first_stage_non_max_suppression_fn = functools.partial( - post_processing.batch_multiclass_non_max_suppression, - score_thresh=self.first_stage_nms_score_threshold, - iou_thresh=self.first_stage_nms_iou_threshold, - max_size_per_class=self.first_stage_max_proposals, - max_total_size=self.first_stage_max_proposals, - use_static_shapes=self.use_static_shapes, - use_partitioned_nms=self.use_partitioned_nms_in_first_stage, - use_combined_nms=self.use_combined_nms_in_first_stage) - - # First stage localization loss weight - self.first_stage_localization_loss_weight = 2.0 - - # First stage objectness loss weight - self.first_stage_objectness_loss_weight = 1.0 - - # Second stage target assigner - self.second_stage_target_assigner = target_assigner.create_target_assigner( - 'FasterRCNN', 'detection', use_matmul_gather=self.use_matmul_gather_in_matcher) - - # Second stage sampler - self.second_stage_batch_size = 64 # Default - self.second_stage_balance_fraction = 0.25 # Default - self.second_stage_sampler = sampler.BalancedPositiveNegativeSampler( - positive_fraction=self.second_stage_balance_fraction, - is_static=(self.use_static_balanced_label_sampler and self.use_static_shapes)) - - # Second stage box predictor - self.second_stage_box_predictor = NetworkDesc( - desc.mask_rcnn_box).to_model() - - # Second stage NMS function - self.second_stage_non_max_suppression_fn, self.second_stage_score_conversion_fn = \ - post_processing_util.get_post_processing_fn(desc.second_stage_post_processing) - - # Second stage mask prediction loss weight - self.second_stage_mask_prediction_loss_weight = 1.0 # default - - # Second stage localization loss weight - self.second_stage_localization_loss_weight = 2.0 - - # Second stage classification loss weight - self.second_stage_classification_loss_weight = 1.0 - - # Second stage classification loss - self.logit_scale = 1.0 # Default - self.second_stage_classification_loss = losses.WeightedSoftmaxClassificationLoss( - logit_scale=self.logit_scale) - - self.hard_example_miner = None - self.add_summaries = True - - # Crop and resize function - self.use_matmul_crop_and_resize = False # Default - self.crop_and_resize_fn = ( - spatial_ops.multilevel_matmul_crop_and_resize - if self.use_matmul_crop_and_resize - else spatial_ops.native_crop_and_resize) - - self.clip_anchors_to_image = False # Default - self.resize_masks = True # Default - self.return_raw_detections_during_predict = False # Default - self.output_final_box_features = False # Default - - # Image resizer function - self.image_resizer_fn = image_resizer_util.get_image_resizer( - desc.image_resizer) - - self.initial_crop_size = 14 - self.maxpool_kernel_size = 2 - self.maxpool_stride = 2 - - # Real model to be called - self.model = None - - def _init_model(self, training): - - # Init FasterRCNNMetaArch - common_kwargs = { - 'is_training': training, - 'num_classes': self.num_classes, - 'image_resizer_fn': self.image_resizer_fn, - 'feature_extractor': self.feature_extractor.get_real_model(training), - 'number_of_stages': self.number_of_stages, - 'first_stage_anchor_generator': self.first_stage_anchor_generator.get_real_model(training), - 'first_stage_target_assigner': self.first_stage_target_assigner, - 'first_stage_atrous_rate': self.first_stage_atrous_rate, - 'first_stage_box_predictor_arg_scope_fn': self.first_stage_box_predictor_arg_scope_fn, - 'first_stage_box_predictor_kernel_size': self.first_stage_box_predictor_kernel_size, - 'first_stage_box_predictor_depth': self.first_stage_box_predictor_depth, - 'first_stage_minibatch_size': self.first_stage_minibatch_size, - 'first_stage_sampler': self.first_stage_sampler, - 'first_stage_non_max_suppression_fn': self.first_stage_non_max_suppression_fn, - 'first_stage_max_proposals': self.first_stage_max_proposals, - 'first_stage_localization_loss_weight': self.first_stage_localization_loss_weight, - 'first_stage_objectness_loss_weight': self.first_stage_objectness_loss_weight, - 'second_stage_target_assigner': self.second_stage_target_assigner, - 'second_stage_batch_size': self.second_stage_batch_size, - 'second_stage_sampler': self.second_stage_sampler, - 'second_stage_non_max_suppression_fn': self.second_stage_non_max_suppression_fn, - 'second_stage_score_conversion_fn': self.second_stage_score_conversion_fn, - 'second_stage_localization_loss_weight': self.second_stage_localization_loss_weight, - 'second_stage_classification_loss': self.second_stage_classification_loss, - 'second_stage_classification_loss_weight': self.second_stage_classification_loss_weight, - 'hard_example_miner': self.hard_example_miner, - 'add_summaries': self.add_summaries, - 'crop_and_resize_fn': self.crop_and_resize_fn, - 'clip_anchors_to_image': self.clip_anchors_to_image, - 'use_static_shapes': self.use_static_shapes, - 'resize_masks': self.resize_masks, - 'return_raw_detections_during_predict': self.return_raw_detections_during_predict, - 'output_final_box_features': self.output_final_box_features - } - - self.model = faster_rcnn_meta_arch.FasterRCNNMetaArch( - initial_crop_size=self.initial_crop_size, - maxpool_kernel_size=self.maxpool_kernel_size, - maxpool_stride=self.maxpool_stride, - second_stage_mask_rcnn_box_predictor=self.second_stage_box_predictor.get_real_model( - training), - second_stage_mask_prediction_loss_weight=( - self.second_stage_mask_prediction_loss_weight), - **common_kwargs) - - def get_real_model(self, training): - """Get or init real model.""" - if self.model: - return self.model - else: - self._init_model(training) - return self.model - - def __call__(self, features, labels, training): - """Forward function of faster-rcnn.""" - if training: - self.get_real_model(training).provide_groundtruth( - groundtruth_boxes_list=tf.unstack( - labels[fields.InputDataFields.groundtruth_boxes]), - groundtruth_classes_list=tf.unstack( - labels[fields.InputDataFields.groundtruth_classes]), - groundtruth_weights_list=tf.unstack(labels[fields.InputDataFields.groundtruth_weights])) - - predict_results = self.get_real_model(training).predict(features[fields.InputDataFields.image], - features[fields.InputDataFields.true_image_shape]) - return predict_results - - def loss(self, predict_results, true_image_shapes): - """Get loss function of faster-rcnn.""" - return self.get_real_model(True).loss(predict_results, true_image_shapes) - - def updates(self): - """Update faster-rcnn model.""" - return self.get_real_model(True).updates() - - def regularization_losses(self): - """Get regularization loss of faster-rcnn.""" - return self.get_real_model(True).regularization_losses() - - def restore_map(self, fine_tune_checkpoint_type, load_all_detection_checkpoint_vars): - """Restore map of faster-rcnn.""" - return self.get_real_model(True).restore_map( - fine_tune_checkpoint_type=fine_tune_checkpoint_type, - load_all_detection_checkpoint_vars=(load_all_detection_checkpoint_vars)) diff --git a/vega/networks/tensorflow/detectors/faster_rcnn_trainer_callback.py b/vega/networks/tensorflow/detectors/faster_rcnn_trainer_callback.py deleted file mode 100644 index 15c2d85..0000000 --- a/vega/networks/tensorflow/detectors/faster_rcnn_trainer_callback.py +++ /dev/null @@ -1,83 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""CARS trainer.""" -import logging - -import tensorflow as tf -import tf_slim as slim -from object_detection.core import standard_fields as fields -from object_detection.utils import variables_helper - -from vega.common import ClassFactory, ClassType -from vega.trainer.callbacks import Callback -from .tf_optimizer import TFOptimizer - - -@ClassFactory.register(ClassType.CALLBACK) -class FasterRCNNTrainerCallback(Callback): - """A special callback for FasterRCNNTrainer.""" - - disable_callbacks = ["ModelStatistics"] - - def model_fn(self, features, labels, mode): - """Define Faster R-CNN model_fn used by TensorFlow Estimator.""" - logging.info('Faster R-CNN model function action') - self.model = self.trainer.model - self.config = self.trainer.config - predict_result_dict = self.model( - features, labels, mode == tf.estimator.ModeKeys.TRAIN) - - self.fine_tune_checkpoint_type = self.config.fine_tune_checkpoint_type - self.load_all_detection_checkpoint_vars = True - asg_map = self.model.restore_map( - fine_tune_checkpoint_type=self.fine_tune_checkpoint_type, - load_all_detection_checkpoint_vars=( - self.load_all_detection_checkpoint_vars)) - - self.fine_tune_checkpoint = self.config.fine_tune_checkpoint - available_var_map = ( - variables_helper.get_variables_available_in_checkpoint( - asg_map, - self.fine_tune_checkpoint, - include_global_step=False)) - tf.train.init_from_checkpoint(self.fine_tune_checkpoint, - available_var_map) - - losses_dict = self.model.loss( - predict_result_dict, features[fields.InputDataFields.true_image_shape]) - losses = [loss_tensor for loss_tensor in losses_dict.values()] - total_loss = tf.add_n(losses, name='total_loss') - train_op = None - if mode == tf.estimator.ModeKeys.TRAIN: - global_step = tf.train.get_or_create_global_step() - self.optimizer, self.optimizer_summary_vars = TFOptimizer( - self.config.optimizer).get_real_optimizer(global_step) - trainable_variables = None - trainable_variables = slim.filter_variables( - tf.trainable_variables()) - clip_gradients_value = None - summaries = None - train_op = slim.optimizers.optimize_loss( - loss=total_loss, - global_step=global_step, - learning_rate=None, - clip_gradients=clip_gradients_value, - optimizer=self.optimizer, - update_ops=self.model.updates(), - variables=trainable_variables, - summaries=summaries, - name='') # Preventing scope prefix on all variables. - - eval_metric_ops = None - if mode == tf.estimator.ModeKeys.EVAL: - eval_metric_ops = self.valid_metrics(predict_result_dict, labels) - return tf.estimator.EstimatorSpec(mode=mode, loss=total_loss, train_op=train_op, - eval_metric_ops=eval_metric_ops) diff --git a/vega/networks/tensorflow/detectors/tf_optimizer.py b/vega/networks/tensorflow/detectors/tf_optimizer.py deleted file mode 100644 index 264c132..0000000 --- a/vega/networks/tensorflow/detectors/tf_optimizer.py +++ /dev/null @@ -1,137 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Defined faster rcnn detector.""" -import tensorflow as tf -from tensorflow.contrib import opt as tf_opt -from object_detection.utils import learning_schedules - - -class TFOptimizer(object): - """TFOptimizer.""" - - def __init__(self, desc): - """Init TFOptimizer. - - :param desc: config dict - """ - self.type = desc.type - self.lr = desc.lr - self.momentum = desc.momentum - self.weight_decay = desc.weight_decay if hasattr( - desc, 'weight_decay') else 0 - self.epsilon = desc.epsilon if hasattr(desc, 'epsilon') else 0 - self.use_moving_average = desc.use_moving_average if hasattr( - desc, 'use_moving_average') else True - self.moving_average_decay = desc.moving_average_decay if hasattr( - desc, 'moving_average_decay') else 0.9999 - self.warmup = desc.warmup if hasattr(desc, 'warmup') else False - self.optimizer = None - self.summary_vars = [] - - def _create_learning_rate(self, learning_rate_config, global_step=None): - """Create optimizer learning rate based on config. - - Args: - learning_rate_config: A LearningRate proto message. - global_step: A variable representing the current step. - If None, defaults to tf.train.get_or_create_global_step() - Returns: - A learning rate. - Raises: - ValueError: when using an unsupported input data type. - """ - if global_step is None: - global_step = tf.train.get_or_create_global_step() - learning_rate = None - learning_rate_type = learning_rate_config.type - if learning_rate_type == 'constant_learning_rate': - config = learning_rate_config.constant_learning_rate - learning_rate = tf.constant(config.learning_rate, dtype=tf.float32, - name='learning_rate') - - if learning_rate_type == 'exponential_decay_learning_rate': - config = learning_rate_config.exponential_decay_learning_rate - learning_rate = learning_schedules.exponential_decay_with_burnin( - global_step, - config.initial_learning_rate, - config.decay_steps, - config.decay_factor, - burnin_learning_rate=config.burnin_learning_rate, - burnin_steps=config.burnin_steps, - min_learning_rate=config.min_learning_rate, - staircase=config.staircase) - - if learning_rate_type == 'manual_step_learning_rate': - config = learning_rate_config - if not config.schedule: - raise ValueError('Empty learning rate schedule.') - learning_rate_step_boundaries = [x['step'] for x in config.schedule] - learning_rate_sequence = [config.initial_learning_rate] - learning_rate_sequence += [x['learning_rate'] - for x in config.schedule] - learning_rate = learning_schedules.manual_stepping( - global_step, learning_rate_step_boundaries, - learning_rate_sequence, self.warmup) - - if learning_rate_type == 'cosine_decay_learning_rate': - config = learning_rate_config.cosine_decay_learning_rate - learning_rate = learning_schedules.cosine_decay_with_warmup( - global_step, - config.learning_rate_base, - config.total_steps, - config.warmup_learning_rate, - config.warmup_steps, - config.hold_base_rate_steps) - - if learning_rate is None: - raise ValueError('Learning_rate %s not supported.' % - learning_rate_type) - - return learning_rate - - def get_real_optimizer(self, global_step=None): - """Get real optimizer for faster-rcnn.""" - if self.optimizer: - return self.optimizer, self.summary_vars - else: - if self.type == 'RMSPropOptimizer': - learning_rate = self._create_learning_rate(self.lr, - global_step=global_step) - self.summary_vars.append(learning_rate) - self.optimizer = tf.train.RMSPropOptimizer( - learning_rate, - decay=self.weight_decay, - momentum=self.momentum, - epsilon=self.epsilon) - - if self.type == 'MomentumOptimizer': - learning_rate = self._create_learning_rate(self.lr, - global_step=global_step) - self.summary_vars.append(learning_rate) - self.optimizer = tf.train.MomentumOptimizer( - learning_rate, - momentum=self.momentum) - - if self.type == 'AdamOptimizer': - learning_rate = self._create_learning_rate(self.lr, - global_step=global_step) - self.summary_vars.append(learning_rate) - self.optimizer = tf.train.AdamOptimizer( - learning_rate, epsilon=self.epsilon) - - if self.optimizer is None: - raise ValueError('Optimizer %s not supported.' % self.type) - - if self.use_moving_average: - self.optimizer = tf_opt.MovingAverageOptimizer( - self.optimizer, average_decay=self.moving_average_decay) - - return self.optimizer, self.summary_vars diff --git a/vega/networks/tensorflow/gcn/layers.py b/vega/networks/tensorflow/gcn/layers.py index d95fa8b..2501ee5 100644 --- a/vega/networks/tensorflow/gcn/layers.py +++ b/vega/networks/tensorflow/gcn/layers.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """GCN Layers.""" import numpy as np import tensorflow as tf @@ -22,9 +28,7 @@ def first_approx(W, n): A = W + np.identity(n) d = np.sum(A, axis=1) sinvD = np.sqrt(np.mat(np.diag(d)).I) - # refer to Eq.5 return np.mat(np.identity(n) + sinvD * A * sinvD) - # return np.mat(sinvD * A * sinvD) def scaled_laplacian_tensor(W): @@ -33,11 +37,8 @@ def scaled_laplacian_tensor(W): :param W: tensor, [n_route, n_route], weighted adjacency matrix of G. :return: tensor, [n_route, n_route]. """ - # d -> diagonal degree matrix n = W.get_shape().as_list()[1] D = tf.reduce_sum(W, -1) - - # L -> graph Laplacian L = tf.matrix_diag(D) - W sinvD = tf.matrix_diag(1 / tf.sqrt(D)) @@ -56,12 +57,9 @@ def cheb_poly_approx_tensor(L, ks, n=None): :return: np.ndarray, [n_route, Ks*n_route]. """ n = int(L.get_shape()[1] if n is None else n) - # d = tf.expand_dims(tf.eye(n), 0) - # d = tf.eye(n) L0, L1 = tf.eye(n), L L_list = [L0, L1] for i in range(ks - 2): - # L0 = d + L - L Ln = 2 * L * L - L0 L0, L1 = L1, Ln L_list.append(Ln) @@ -80,15 +78,11 @@ def spatial_attention_layer(x): W_3 = tf.get_variable(name='spatial_w_3', shape=[c, 1], dtype=tf.float32) b_s = tf.get_variable(name='spatial_b_s', shape=[1, n, n], dtype=tf.float32) V_s = tf.get_variable(name='spatial_v_s', shape=[n, n], dtype=tf.float32) - # x -> [batch_size, n_route, c_in, time_step] x_tmp = tf.transpose(x, [0, 2, 3, 1]) lhs = tf.tensordot(tf.squeeze(tf.tensordot(x_tmp, W_1, axes=[[3], [0]]), axis=-1), W_2, - axes=[[2], [0]]) # [batch_size, n_route, time_step] - # x : [batch_size, time_step, n_route, c_in] - rhs = tf.squeeze(tf.tensordot(x, W_3, axes=[[3], [0]]), axis=-1) # [batch_size, time_step, n_route] - # s = tf.matmul(tf.nn.sigmoid(tf.matmul(lhs, rhs) + b_s), V_s) # [batch_size, n_route, n_route] - s = tf.tensordot(tf.nn.sigmoid(tf.matmul(lhs, rhs) + b_s), V_s, axes=[[2], [0]]) # [batch_size, n_route, n_route] - # normalization + axes=[[2], [0]]) + rhs = tf.squeeze(tf.tensordot(x, W_3, axes=[[3], [0]]), axis=-1) + s = tf.tensordot(tf.nn.sigmoid(tf.matmul(lhs, rhs) + b_s), V_s, axes=[[2], [0]]) return tf.nn.softmax(s, axis=1) @@ -104,15 +98,13 @@ def temporal_attention_layer(x): W_3 = tf.get_variable(name='temporal_w_3', shape=[c, 1], dtype=tf.float32) b_s = tf.get_variable(name='temporal_b_s', shape=[1, T, T], dtype=tf.float32) V_s = tf.get_variable(name='temporal_v_s', shape=[T, T], dtype=tf.float32) - # x -> [batch_size, time_step, c_in, n_route] x_tmp = tf.transpose(x, [0, 1, 3, 2]) lhs = tf.tensordot(tf.squeeze(tf.tensordot(x_tmp, W_1, axes=[[3], [0]]), axis=-1), W_2, - axes=[[2], [0]]) # [batch_size, time_step, n_route] + axes=[[2], [0]]) rhs = tf.transpose(tf.squeeze(tf.tensordot(x, W_3, axes=[[3], [0]]), axis=-1), - [0, 2, 1]) # [batch_size, n_route, time_step] + [0, 2, 1]) s = tf.tensordot(tf.nn.sigmoid(tf.matmul(lhs, rhs) + b_s), V_s, - axes=[[2], [0]]) # [batch_size, time_step, time_step] - # normalization + axes=[[2], [0]]) s = tf.nn.softmax(s, axis=1) return s @@ -132,8 +124,7 @@ def gconv_layer(x, Ks, number_of_layers, spatial_attention, c_in, c_out, adjacen """ _, T, n, _ = x.get_shape().as_list() - # Currently only support for one kernel when using variable adjacency matrix - kernels = tf.get_collection('graph_kernel') # list of graph kernels defined in main.py + kernels = tf.get_collection('graph_kernel') if adjacency_matrix is not None: L = scaled_laplacian_tensor(adjacency_matrix) Lk = cheb_poly_approx_tensor(L, Ks, n) @@ -141,7 +132,7 @@ def gconv_layer(x, Ks, number_of_layers, spatial_attention, c_in, c_out, adjacen for layer in range(number_of_layers): x_result = [] - x = tf.reshape(x, [-1, n, c_in]) # [batch_size*time_step, n_route, c_in] + x = tf.reshape(x, [-1, n, c_in]) for i, kernel in enumerate(kernels): theta = tf.get_variable( @@ -153,39 +144,24 @@ def gconv_layer(x, Ks, number_of_layers, spatial_attention, c_in, c_out, adjacen name='bs_' + str(layer) + '_' + str(i), initializer=tf.zeros([c_out]), dtype=tf.float32) - - # kernel: graph kernel: tensor, [n_route, Ks*n_route] - # n = tf.shape(kernel)[0] - # x -> [batch_size, c_in, n_route] -> [batch_size*c_in, n_route] x = tf.transpose(x, [0, 2, 1]) x_tmp = tf.reshape(x, [-1, n]) - if (adjacency_matrix is not None) and (i == len(kernels) - 1): # dynamic graph - # x_tmp -> [batch_size*c_in, n_route] - # adjacency_matrix -> [batch_size, n_route, Ks*n_route] - # x_mul = tf.tensordot( - # x, adjacency_matrix, axes=[[2], [1]]) # [batch_size, c_in, batch_size, Ks*n_route] - - # for each x,matrix pair in batch_size: - # [batch_Size,c_in, n_route] * [batch_Size, n_route, Ks*n_route] -> [c_in, Ks*n_route] - # [batch_Size, c_in, Ks*n_route] + if (adjacency_matrix is not None) and (i == len(kernels) - 1): x_mul = tf.matmul(x, kernel) x_mul = tf.reshape(x_mul, [-1, c_in, Ks, n]) else: - # x_mul = x_tmp * ker -> [batch_size*c_in, Ks*n_route] -> [batch_size, c_in, Ks, n_route] x_mul = tf.reshape(tf.matmul(x_tmp, kernel), [-1, c_in, Ks, n]) - # x_ker -> [batch_size, n_route, c_in, K_s] -> [batch_size*n_route, c_in*Ks] x_ker = tf.reshape(tf.transpose(x_mul, [0, 3, 1, 2]), [-1, c_in * Ks]) - # x_gconv -> [batch_size*n_route, c_out] -> [batch_size, n_route, c_out] x_gconv = tf.reshape(tf.matmul(x_ker, theta), [-1, n, c_out]) + bs x_gc = tf.reshape(x_gconv, [-1, T, n, c_out]) - x_output = tf.nn.relu(x_gc[:, :, :, 0:c_out]) # activation + x_output = tf.nn.relu(x_gc[:, :, :, 0:c_out]) x_result.append(x_output) - x = tf.add_n(x_result) # add the result from multi-graph + x = tf.add_n(x_result) c_in = c_out return x @@ -211,15 +187,13 @@ def GCN_GRU(x, Ks, channels, gru_layers, gcn_layers, keep_prob, temporal_attenti with tf.variable_scope('gcn_gru'): if temporal_attention: _, T, n, c = x.get_shape().as_list() - s_t = temporal_attention_layer(x) # [batch_size, time_step, time_step ] - x_tmp = tf.reshape(tf.transpose(x, [0, 2, 3, 1]), [-1, n * c, T]) # [batch_size, n_route*c_out, time_step] + s_t = temporal_attention_layer(x) + x_tmp = tf.reshape(tf.transpose(x, [0, 2, 3, 1]), [-1, n * c, T]) x = tf.transpose(tf.reshape(tf.matmul(x_tmp, s_t), [-1, n, c, T]), - [0, 3, 1, 2]) # [batch_size, time_step,n_route,c_out] - # first GCN then GRU + [0, 3, 1, 2]) with tf.variable_scope('gcn_gru'): - x_s = gconv_layer(x, Ks, gcn_layers, spatial_attention, 1, c_gcn) # [batch_size, time_step, n_route, c_out] + x_s = gconv_layer(x, Ks, gcn_layers, spatial_attention, 1, c_gcn) x_s = gru_layer(x_s, gru_layers, c_gru, keep_prob) - # x_ln = layer_norm(x_t, 'layer_norm_'+scope) return tf.nn.dropout(x_s, keep_prob) @@ -233,17 +207,14 @@ def gru_layer(x, number_of_layers, c_out, keep_prob): :return: tensor, [batch_size, 1, n_route, c_out] """ dim = x.get_shape().as_list() - x = tf.reshape(tf.transpose(x, [0, 2, 1, 3]), [-1, dim[1], dim[3]]) # [batch_size*n_route, time_step, c_in] + x = tf.reshape(tf.transpose(x, [0, 2, 1, 3]), [-1, dim[1], dim[3]]) cell = get_a_cell(c_out, keep_prob) if number_of_layers > 1: cell = tf.nn.rnn_cell.MultiRNNCell([get_a_cell(c_out, keep_prob) for _ in range(number_of_layers)]) - - # _, last_states = tf.nn.dynamic_rnn(cell=cell, inputs=x, dtype=tf.float32) #last_states: [batch_size*n_route,c_out] - input_x = tf.unstack(x, num=dim[1], axis=1) # needed if use tf.contrib.rnn.static_rnn + input_x = tf.unstack(x, num=dim[1], axis=1) outputs, last_states = tf.contrib.rnn.static_rnn(cell=cell, inputs=input_x, dtype=tf.float32) - # print(outputs[-1],last_states[-1]) # these two are the same - return tf.expand_dims(tf.reshape(outputs[-1], [-1, dim[2], c_out]), 1) # [batch_size, 1, n_route, c_out] + return tf.expand_dims(tf.reshape(outputs[-1], [-1, dim[2], c_out]), 1) def get_a_cell(hidden_size, keep_prob): diff --git a/vega/networks/tensorflow/losses/charbonnier.py b/vega/networks/tensorflow/losses/charbonnier.py index 99da70d..c2b8e6d 100644 --- a/vega/networks/tensorflow/losses/charbonnier.py +++ b/vega/networks/tensorflow/losses/charbonnier.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Charbonnier Loss class.""" import functools import tensorflow as tf diff --git a/vega/networks/tensorflow/losses/cross_entropy_loss.py b/vega/networks/tensorflow/losses/cross_entropy_loss.py index bbd6152..317b25d 100644 --- a/vega/networks/tensorflow/losses/cross_entropy_loss.py +++ b/vega/networks/tensorflow/losses/cross_entropy_loss.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """cross Entropy Weight Decay Loss.""" diff --git a/vega/networks/tensorflow/losses/mix_auxiliary_loss.py b/vega/networks/tensorflow/losses/mix_auxiliary_loss.py index ff168ed..f4f9af9 100644 --- a/vega/networks/tensorflow/losses/mix_auxiliary_loss.py +++ b/vega/networks/tensorflow/losses/mix_auxiliary_loss.py @@ -1,10 +1,16 @@ # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Mix Auxiliary Loss.""" import importlib diff --git a/vega/networks/tensorflow/necks/mask_rcnn_box.py b/vega/networks/tensorflow/necks/mask_rcnn_box.py index a626431..b4a9be8 100644 --- a/vega/networks/tensorflow/necks/mask_rcnn_box.py +++ b/vega/networks/tensorflow/necks/mask_rcnn_box.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined faster rcnn detector.""" diff --git a/vega/networks/tensorflow/network.py b/vega/networks/tensorflow/network.py index e4426e8..567a2a4 100644 --- a/vega/networks/tensorflow/network.py +++ b/vega/networks/tensorflow/network.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined TensorFlow sequential network.""" import tensorflow as tf diff --git a/vega/networks/tensorflow/resnet_tf.py b/vega/networks/tensorflow/resnet_tf.py index 9ee7241..7284499 100644 --- a/vega/networks/tensorflow/resnet_tf.py +++ b/vega/networks/tensorflow/resnet_tf.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined TensorFlow sequential network.""" from official.r1.resnet import resnet_model diff --git a/vega/networks/tensorflow/utils/anchor_utils/anchor_generator.py b/vega/networks/tensorflow/utils/anchor_utils/anchor_generator.py index f1109c1..796d6fc 100644 --- a/vega/networks/tensorflow/utils/anchor_utils/anchor_generator.py +++ b/vega/networks/tensorflow/utils/anchor_utils/anchor_generator.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined faster rcnn detector.""" diff --git a/vega/networks/tensorflow/utils/hyperparams/initializer.py b/vega/networks/tensorflow/utils/hyperparams/initializer.py index 0fcf808..f6c9e3f 100644 --- a/vega/networks/tensorflow/utils/hyperparams/initializer.py +++ b/vega/networks/tensorflow/utils/hyperparams/initializer.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined initializer for tf backend.""" import tensorflow as tf diff --git a/vega/networks/tensorflow/utils/hyperparams/regularizer.py b/vega/networks/tensorflow/utils/hyperparams/regularizer.py index 02a2698..6d6236b 100644 --- a/vega/networks/tensorflow/utils/hyperparams/regularizer.py +++ b/vega/networks/tensorflow/utils/hyperparams/regularizer.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined faster rcnn detector.""" import tf_slim as slim diff --git a/vega/networks/tensorflow/utils/hyperparams/scope_generator.py b/vega/networks/tensorflow/utils/hyperparams/scope_generator.py index d1a1eb1..c035272 100644 --- a/vega/networks/tensorflow/utils/hyperparams/scope_generator.py +++ b/vega/networks/tensorflow/utils/hyperparams/scope_generator.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Defined faster rcnn detector.""" import tensorflow as tf diff --git a/vega/networks/tensorflow/utils/image_resizer/__init__.py b/vega/networks/tensorflow/utils/image_resizer/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/vega/networks/tensorflow/utils/image_resizer/image_resizer_util.py b/vega/networks/tensorflow/utils/image_resizer/image_resizer_util.py deleted file mode 100644 index 71d63f8..0000000 --- a/vega/networks/tensorflow/utils/image_resizer/image_resizer_util.py +++ /dev/null @@ -1,40 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Defined faster rcnn detector.""" -import functools -import tensorflow as tf -from object_detection.core import preprocessor - - -def get_image_resizer(desc): - """Get image resizer function.""" - image_resizer_type = desc.type - min_dimension = desc.min_dimension - max_dimension = desc.max_dimension - pad_to_max_dimension = desc.pad_to_max_dimension if 'pad_to_max_dimension' in desc else False - resize_method = desc.resize_method if 'resize_method' in desc else tf.image.ResizeMethod.BILINEAR - if image_resizer_type == 'keep_aspect_ratio_resizer': - if not (min_dimension <= max_dimension): - raise ValueError('min_dimension > max_dimension') - per_channel_pad_value = (0, 0, 0) - if 'per_channel_pad_value' in desc and desc.per_channel_pad_value: - per_channel_pad_value = tuple(desc.per_channel_pad_value) - image_resizer_fn = functools.partial( - preprocessor.resize_to_range, - min_dimension=min_dimension, - max_dimension=max_dimension, - method=resize_method, - pad_to_max_dimension=pad_to_max_dimension, - per_channel_pad_value=per_channel_pad_value) - return image_resizer_fn - else: - raise ValueError( - 'Invalid image resizer option: \'%s\'.' % image_resizer_type) diff --git a/vega/networks/tensorflow/utils/post_processing/__init__.py b/vega/networks/tensorflow/utils/post_processing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/vega/networks/tensorflow/utils/post_processing/post_processing_util.py b/vega/networks/tensorflow/utils/post_processing/post_processing_util.py deleted file mode 100644 index 546e20c..0000000 --- a/vega/networks/tensorflow/utils/post_processing/post_processing_util.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Defined faster rcnn detector.""" -import functools -import tensorflow as tf -from object_detection.core import post_processing - - -def _score_converter_fn_with_logit_scale(tf_score_converter_fn, logit_scale=1.0): - """Create a function to scale logits then apply a Tensorflow function.""" - def score_converter_fn(logits): - scaled_logits = tf.multiply( - logits, 1.0 / logit_scale, name='scale_logits') - return tf_score_converter_fn(scaled_logits, name='convert_scores') - score_converter_fn.__name__ = '%s_with_logit_scale' % ( - tf_score_converter_fn.__name__) - return score_converter_fn - - -def _get_score_converter_fn(score_converter_type): - if score_converter_type == 'IDENTITY': - return _score_converter_fn_with_logit_scale(tf.identity) - if score_converter_type == 'SIGMOID': - return _score_converter_fn_with_logit_scale(tf.sigmoid) - if score_converter_type == 'SOFTMAX': - return _score_converter_fn_with_logit_scale(tf.nn.softmax) - raise ValueError('Unknown score converter.') - - -def _get_non_max_suppressor_fn(desc): - score_threshold = desc.score_threshold if 'score_threshold' in desc else 0.0 - iou_threshold = desc.iou_threshold if 'iou_threshold' in desc else 0.6 - max_detections_per_class = desc.max_detections_per_class if 'max_detections_per_class' in desc else 100 - max_total_detections = desc.max_total_detections if 'max_total_detections' in desc else 100 - use_static_shapes = desc.use_static_shapes if 'use_static_shapes' in desc else False - use_class_agnostic_nms = desc.use_class_agnostic_nms if 'use_class_agnostic_nms' in desc else False - max_classes_per_detection = desc.max_classes_per_detection if 'max_classes_per_detection' in desc else 1 - soft_nms_sigma = desc.soft_nms_sigma if 'soft_nms_sigma' in desc else 0.0 - use_partitioned_nms = desc.use_partitioned_nms if 'use_partitioned_nms' in desc else False - use_combined_nms = desc.use_combined_nms if 'use_combined_nms' in desc else False - change_coordinate_frame = desc.change_coordinate_frame if 'change_coordinate_frame' in desc else True - if iou_threshold < 0 or iou_threshold > 1.0: - raise ValueError('iou_threshold not in [0, 1.0].') - if max_detections_per_class > max_total_detections: - raise ValueError('max_detections_per_class should be no greater than ' - 'max_total_detections.') - if soft_nms_sigma < 0.0: - raise ValueError('soft_nms_sigma should be non-negative.') - if use_combined_nms and use_class_agnostic_nms: - raise ValueError('combined_nms does not support class_agnostic_nms.') - - non_max_suppressor_fn = functools.partial( - post_processing.batch_multiclass_non_max_suppression, - score_thresh=score_threshold, - iou_thresh=iou_threshold, - max_size_per_class=max_detections_per_class, - max_total_size=max_total_detections, - use_static_shapes=use_static_shapes, - use_class_agnostic_nms=use_class_agnostic_nms, - max_classes_per_detection=max_classes_per_detection, - soft_nms_sigma=soft_nms_sigma, - use_partitioned_nms=use_partitioned_nms, - use_combined_nms=use_combined_nms, - change_coordinate_frame=change_coordinate_frame) - return non_max_suppressor_fn - - -def get_post_processing_fn(desc): - """Get post processing function.""" - nms_config = desc.batch_non_max_suppression - score_converter_type = desc.score_converter - non_max_suppressor_fn = _get_non_max_suppressor_fn(nms_config) - score_converter_fn = _get_score_converter_fn(score_converter_type) - return non_max_suppressor_fn, score_converter_fn diff --git a/vega/networks/text_cnn.py b/vega/networks/text_cnn.py index 1cd6369..ca53ddd 100644 --- a/vega/networks/text_cnn.py +++ b/vega/networks/text_cnn.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is TextCNN network.""" from vega.common import ClassFactory, ClassType diff --git a/vega/networks/unet.py b/vega/networks/unet.py index 6518bec..9d195b2 100644 --- a/vega/networks/unet.py +++ b/vega/networks/unet.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """This is SearchSpace for network.""" from vega.common import ClassFactory, ClassType diff --git a/vega/networks/vit.py b/vega/networks/vit.py new file mode 100644 index 0000000..11e740f --- /dev/null +++ b/vega/networks/vit.py @@ -0,0 +1,230 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Implementation of Visison Transformer(ViT).""" +import logging +from functools import partial +from collections import OrderedDict +import numpy as np +from vega.modules.operators import ops +from vega.common.class_factory import ClassFactory, ClassType +from vega.modules.module import Module +from vega.modules.connections import Sequential + +_logger = logging.getLogger(__name__) + + +class Mlp(Module): + """Mlp layer in Transformer.""" + + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=ops.gelu, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = ops.Linear(in_features, hidden_features) + self.act = act_layer + self.fc2 = ops.Linear(hidden_features, out_features) + self.drop = ops.Dropout(drop) + + def call(self, x): + """Forward mlp layer.""" + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(Module): + """Attention layer in Transformer.""" + + def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + self.qkv = ops.Linear(dim, dim * 3, qkv_bias) + self.attn_drop = ops.Dropout(attn_drop) + self.proj = ops.Linear(dim, dim) + self.proj_drop = ops.Dropout(proj_drop) + + def call(self, x): + """Forward Attention layer.""" + B, N, C = x.shape + qkv = ops.View((B, N, 3, self.num_heads, C // self.num_heads))(self.qkv(x)) + qkv = ops.Permute((2, 0, 3, 1, 4))(qkv) + q = qkv[0:1] + k = qkv[1:2] + v = qkv[2:3] + q = ops.Squeeze(0)(q) + k = ops.Squeeze(0)(k) + v = ops.Squeeze(0)(v) + attn = ops.matmul(q, ops.Transpose(2, 3)(k)) * self.scale + attn = ops.softmax(attn, -1) + attn = self.attn_drop(attn) + x = ops.Transpose(1, 2)(ops.matmul(attn, v)) + x = ops.View((B, N, C))(x) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(Module): + """Block of Transformer, which contains one Attenson layer and one MLP layaer.""" + + def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., + drop_path=0., act_layer=ops.gelu, norm_layer=ops.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + self.attn = Attention( + dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = ops.DropPath(drop_path) if drop_path > 0. else ops.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + def call(self, x): + """Forward block.""" + x = x + self.drop_path(self.attn(self.norm1(x))) # x shape is (1, 577, 768) + x = x + self.drop_path(self.mlp(self.norm2(x))) # x shape is (1, 577, 768) + return x + + +class PatchEmbed(Module): + """Image to Patch Embedding.""" + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None): + super().__init__() + img_size = (img_size, img_size) + patch_size = (patch_size, patch_size) + self.img_size = img_size + self.patch_size = patch_size + self.patch_grid = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) + self.num_patches = self.patch_grid[0] * self.patch_grid[1] + + self.proj = ops.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + self.norm = norm_layer(embed_dim) if norm_layer else ops.Identity() + self.flatten = ops.Flatten(2) + self.transpose = ops.Transpose(1, 2) + + def call(self, x): + """Forward PatchEmbed.""" + x = self.transpose(self.flatten(self.proj(x))) + x = self.norm(x) + return x + + +@ClassFactory.register(ClassType.NETWORK) +class VisionTransformer(Module): + """Vision Transformer:`An Image is Worth 16x16 Words: Transformers for Image Recognition atScale. + + :param img_size: input image size + :type img_size: int, tuple + :param patch_size: patch_size + :type patch_size: int, tuple + :param in_chans: number of input channels + :type in_chans: int + :param num_classes: number of class for classification head + :type num_classes: int + :param embed_dim: embedding dimension + :type embed_dim: int + :param depth: depth of transformer + :type depth: int + :param num_heads: number of attention heads + :type num_heads: int + :param mlp_ratio: ration of mlp hidden dim to embedding dim + :type mlp_ratio: int + :param qkv_bias: enable biad for qkv if True + :type qkv_bias: bool + :param qk_scale: override default qk scale of head_dim ** -0.5 if set + :type qk_scale: float + :param representation_size: enable and set representation layer (pre-logits) to this value if set + :type representation_size: (Optional[int]) + :param distilled : model includes a distillation token and head as in DeiT models + :type distilled: bool + :parm drop_rate: dropout rate + :type drop_rate: float + :parm attn_drop_rate : attention dropout rate + :type attn_drop_rate: float + :param drop_path_rate: stochastic depth rate + :type drop_path_rate: float + :param embed_layer: patch embedding layer + :type embed_layer: nn.Module + :parm norm_layer: : normalization layer + :type norm_layer: nn.Module + :param weight_init: weight init scheme + :type weight_init: str + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, + num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None, distilled=False, + drop_rate=0., attn_drop_rate=0., drop_path_rate=0., embed_layer=PatchEmbed, norm_layer=None, + act_layer=None, weight_init=''): + """Construct the VisionTransformer class.""" + super().__init__() + self.num_classes = num_classes + self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models + self.num_tokens = 2 if distilled else 1 + norm_layer = norm_layer or partial(ops.LayerNorm, eps=1e-6) + act_layer = act_layer or ops.gelu + + self.patch_embed = embed_layer( + img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) + num_patches = self.patch_embed.num_patches + self.pos_embed = ops.Parameter( + ops.Tensor(np.zeros([1, num_patches + self.num_tokens, embed_dim]).astype(np.float32)), name="pos_embed") + + self.cls_token = ops.Parameter(ops.Tensor(np.zeros([1, 1, embed_dim]).astype(np.float32)), name="cls_token") + + self.pos_drop = ops.Dropout(prob=drop_rate) + + dpr = [x.item() for x in np.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule + self.blocks = Sequential(*[ + Block( + dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, + drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, act_layer=act_layer) + for i in range(depth)]) + self.norm = norm_layer(embed_dim) + + # Representation layer + if representation_size and not distilled: + self.num_features = representation_size + self.pre_logits = Sequential(OrderedDict([ + ('fc', ops.Linear(embed_dim, representation_size)), + ('act', ops.Tanh()) + ])) + else: + self.pre_logits = ops.Identity() + + # Classifier head(s) + self.head = ops.Linear(self.num_features, num_classes) if num_classes > 0 else ops.Identity() + + def call(self, x): + """Forward VisionTransformer.""" + x = self.patch_embed(x) + cls_token = ops.expand(self.cls_token, (x.shape[0], 1, 1)) + x = ops.concat((cls_token, x), dim=1) + x = self.pos_drop(x + self.pos_embed) + x = self.blocks(x) + x = self.norm(x) + x = x[:, 0:1, :] + x = ops.Squeeze(1)(x) + x = self.pre_logits(x) + x = self.head(x) + return x diff --git a/vega/op_search/automl_zero.py b/vega/op_search/automl_zero.py new file mode 100644 index 0000000..a407f5e --- /dev/null +++ b/vega/op_search/automl_zero.py @@ -0,0 +1,344 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This is a class for AutomlZero.""" +import random +import copy +import logging +import numpy as np +import yaml +import pandas as pd +from vega.common.dag import DAG +from .ops import filter_rules, init_dict, unary_ops, binary_ops, constant_nodes, MAX_LEN_OF_FORMULA +from .utils import get_upstreams, dag2compute + + +class AutomlZero(): + """Define AutomlZero.""" + + def __init__(self, population_num=50, max_sample=20000, search_space=None): + self.max_sample = max_sample + self.population_num = population_num + self.sample_count = 0 + self.sieve_columns = ['sample_id', 'code', 'fitness'] + self.population = pd.DataFrame(columns=self.sieve_columns) + self.all_samples = pd.DataFrame(columns=self.sieve_columns) + self.oldest_index = 0 + if search_space is not None: + with open(search_space, "r", encoding="utf-8") as f: + res = yaml.safeload(f, Loader=yaml.FullLoader) + self.unary_ops = res['unary_ops'] + self.binary_ops = res['binary_ops'] + else: + self.unary_ops = unary_ops + self.binary_ops = binary_ops + + def is_finished(self): + """Check if the search is finished.""" + return self.sample_count >= self.max_sample + + def search(self): + """Generate a sample.""" + if len(self.population) <= self.population_num: + sample = self.random_search() + else: + sample = self.ea_search() + if filter_rules(sample) and not self._check_duplication(sample) and self._check_dag_valid(sample): + self.sample_count += 1 + self.all_samples = self.all_samples.append( + [{"sample_id": self.sample_count, "code": sample}], ignore_index=True) + sample = self._resample(sample) + return sample + else: + return None + + def _resample(self, old_sample): + new_sample = copy.deepcopy(old_sample) + random_input = np.random.random([1, 3, 224, 224]).astype(np.float32) * 20 - 10 + dag = DAG() + dag.from_dict(new_sample) + res = dag2compute(dag, random_input) + if isinstance(res, np.ndarray) and res.max() > 1e5: + logging.debug("Resample.") + last_node = get_upstreams(dag, 'out')[0] + const_op = self.add_subscript(new_sample, 'const1') + add_op = self.add_subscript(new_sample, 'add') + div_op = self.add_subscript(new_sample, 'div') + new_sample[const_op] = [add_op, div_op] + init_downstream = new_sample[last_node] + for i in range(len(init_downstream)): + if init_downstream[i] == 'out': + init_downstream[i] = add_op + break + new_sample[last_node] = init_downstream + new_sample[add_op] = [div_op] + new_sample[div_op] = ['out'] + return new_sample + + def _check_duplication(self, sample): + """Check if the code has been sampled.""" + if len(self.all_samples) > 0 and sample in list(self.all_samples.loc[:, 'code']): + return True + else: + return False + return False + + def _check_equivalence(self, fitness): + """Check if the sample is equivalence.""" + if len(self.population) > 0 and np.isclose(fitness, list(self.population.loc[:, 'fitness']), atol=1e-3, + rtol=1e-3).any(): + return True + + else: + return False + return False + + def _check_dag_valid(self, sample): + """Check if the dag is valid or not.""" + try: + dag = DAG() + dag.from_dict(sample) + if dag.size() <= MAX_LEN_OF_FORMULA: + return True + else: + return False + except Exception: + logging.debug('The sample {} is not a valid dag.'.format(sample)) + return False + + def random_search(self): + """Generate a sample by random search.""" + if len(self.all_samples) > 0: + index = random.randint(0, len(self.all_samples) - 1) + default_ind = self.all_samples.iat[index, 1] + else: + default_ind = init_dict + ind = copy.deepcopy(default_ind) + seed = random.randint(0, 2) + if seed == 0: + ind = self.insert(ind) + + elif seed == 1: + ind = self.remove(ind) + + else: + ind = self.swap(ind) + return ind + + def ea_search(self): + """Generate a sample by ea search.""" + select_ind = self.select_parent() + ind = copy.deepcopy(select_ind) + seed = random.randint(0, 2) + if seed == 0: + ind = self.insert(ind) + + elif seed == 1: + ind = self.remove(ind) + + else: + ind = self.swap(ind) + return ind + + def update_fitness(self, num_id, sample, fitness): + """Update the fitness of the populaiton.""" + if len(self.population) <= self.population_num: + self.population = self.population.append( + [{"sample_id": num_id, "code": sample, "fitness": fitness}], ignore_index=True) + elif fitness < self.population.at[self.oldest_index, 'fitness']: + if fitness < 0.1: + self.population.iat[self.oldest_index, 0] = num_id + self.population.iat[self.oldest_index, 1] = sample + self.population.iat[self.oldest_index, 2] = fitness + self.oldest_index = (self.oldest_index + 1) % self.population_num + + elif random.random() < 0.8: + self.population.iat[self.oldest_index, 0] = num_id + self.population.iat[self.oldest_index, 1] = sample + self.population.iat[self.oldest_index, 2] = fitness + self.oldest_index = (self.oldest_index + 1) % self.population_num + + else: + prob = max(1 / np.power(fitness / self.population.at[self.oldest_index, 'fitness'], 6), 0.2) + if random.random() < prob: + self.population.iat[self.oldest_index, 0] = num_id + self.population.iat[self.oldest_index, 1] = sample + self.population.iat[self.oldest_index, 2] = fitness + self.oldest_index = (self.oldest_index + 1) % self.population_num + + def select_elite(self): + """Select elite from the population.""" + pop = copy.deepcopy(self.population) + pop.sort_values(by="fitness", axis=0, ascending=True, inplace=True) + elite = pop.iloc[0]["code"] + return elite + + def select_parent(self): + """Select parent from the population.""" + if random.random() < 0.2: + return self.select_elite() + + select_num = int(self.population_num * 0.1) + select_index = random.sample(range(0, self.population_num - 1), select_num) + select_pop = self.population.iloc[select_index, :] + select_pop.sort_values(by="fitness", axis=0, ascending=True, inplace=True) + return select_pop.iloc[0]["code"] + + def insert(self, ind): + """Insert an operation or node.""" + init_ind = copy.deepcopy(ind) + logging.debug("Use random insert.") + all_nodes = self._get_nodes(ind) + ind_size = len(all_nodes) + insert_pos = random.randint(0, ind_size - 2) + insert_node = all_nodes[insert_pos] + + candidate_ops = self.unary_ops + self.binary_ops + select_op = candidate_ops[random.randint(0, len(candidate_ops) - 1)] + insert_binary_op = True if select_op in self.binary_ops else False + if select_op in all_nodes: + select_op = self.add_subscript(ind, select_op) + logging.debug("insert op: {}.".format(select_op)) + dag = DAG() + dag.from_dict(ind) + downstreams = dag.next_nodes(node=insert_node) + if len(downstreams) == 0: + return ind + select_pos = random.randint(0, len(downstreams) - 1) + edges = ind[insert_node] + tmp = edges[select_pos] + edges[select_pos] = select_op + ind[insert_node] = edges + ind[select_op] = [tmp] + + if insert_binary_op: + if random.random() < 0.5: + candidate_insert = [node for node in constant_nodes if node != insert_node] + extra_insert = candidate_insert[random.randint(0, len(candidate_insert) - 1)] + else: + candidate_insert = [node for node in all_nodes if (node != insert_node and node != 'out')] + if len(candidate_insert) < 1: + return init_ind + index_extra = random.randint(0, len(candidate_insert) - 1) + extra_insert = candidate_insert[index_extra] + if extra_insert in all_nodes: + ind[extra_insert].append(select_op) + else: + ind[extra_insert] = [select_op] + + return ind + + def remove(self, ind): + """Remove an operation or node.""" + logging.debug("Use random remove.") + init_ind = copy.deepcopy(ind) + all_ops = self._get_nodes(ind) + remove_candidate = [node for node in all_ops if + (node != 'in' and not node.startswith('const') and node != 'out')] + if len(remove_candidate) == 0: + logging.debug("There are no nodes can be removed,remove will not apply. ") + return init_ind + remove_pos = random.randint(0, len(remove_candidate) - 1) + remove_node = remove_candidate[remove_pos] + logging.debug("removed node: {}.".format(remove_node)) + logging.debug("the individual before remove is:{}.".format(ind)) + dag = DAG() + dag.from_dict(ind) + upstreams = get_upstreams(dag, node=remove_node) + downstreams = dag.next_nodes(node=remove_node) + if len(upstreams) == 1: + ind = self._remove_node(ind, remove_node, upstreams[0], downstreams) + elif len(upstreams) == 2: + leaf_index = random.randint(0, 1) + reserve_index = 1 - leaf_index + ind = self._remove_node(ind, remove_node, upstreams[reserve_index], downstreams) + ind = self._remove_node(ind, remove_node, upstreams[leaf_index], []) + ind.pop(remove_node) + for downstream in downstreams: + if downstream.split('-')[0] in self.binary_ops: + dag.from_dict(ind) + if len(get_upstreams(dag, downstream)) != 2: + logging.debug( + "The upstreams of binary_op is smaller than 2, is invalid, remove will not apply.") + return init_ind + logging.debug("the individual after remove is:{}.".format(ind)) + return ind + + def _remove_node(self, ind, removed_node, upstream, downstreams): + """Remove a node and process one of the upstream.""" + edges = ind[upstream] + edges.remove(removed_node) + for downstream in downstreams: + edges.append(downstream) + ind[upstream] = edges + return ind + + def swap(self, ind): + """Swap an operation or node.""" + logging.debug("Use random swap.") + init_ind = copy.deepcopy(ind) + all_nodes = self._get_nodes(ind) + swap_candidate = [node for node in all_nodes if + (node.split('-')[0] != 'in' and not node.startswith('const') and node.split('-')[0] != 'out')] + if len(swap_candidate) == 0: + logging.debug("There are no nodes can be swapped,swap will not apply. ") + return init_ind + swap_pos = random.randint(0, len(swap_candidate) - 1) + swap_node = swap_candidate[swap_pos] + swap_node_type = swap_node.split('-')[0] + if swap_node_type in self.unary_ops: + candidate_ops = [op for op in self.unary_ops if op != swap_node_type] + candidate_node = candidate_ops[random.randint(0, len(candidate_ops) - 1)] + elif swap_node_type in self.binary_ops: + candidate_ops = [op for op in self.binary_ops if op != swap_node_type] + candidate_node = candidate_ops[random.randint(0, len(candidate_ops) - 1)] + else: + raise ValueError + + if candidate_node in all_nodes: + candidate_node = self.add_subscript(ind, candidate_node) + + dag = DAG() + dag.from_dict(ind) + upstreams = get_upstreams(dag, swap_node) + for upstream in upstreams: + edges = ind[upstream] + index = 0 + for edge in edges: + if edge == swap_node: + break + index += 1 + edges[index] = candidate_node + ind[upstream] = edges + ind[candidate_node] = ind[swap_node] + ind.pop(swap_node) + + return ind + + def add_subscript(self, ind, select_op): + """Add subscript to the node if the same op is used one more time. e.g. exp, exp-1.""" + repeated_op = [name for name in ind.keys() if name.split('-')[0] == select_op] + exist_subscript = [int(name.split('-')[1]) for name in repeated_op if '-' in name] + if len(exist_subscript) == 0: + exist_subscript = [0] + select_op += "-" + select_op += str(max(exist_subscript) + 1) + return select_op + + def _get_nodes(self, ind): + dag = DAG() + dag.from_dict(ind) + return dag.topological_sort() diff --git a/vega/op_search/davinci_op_impl/exp1.py b/vega/op_search/davinci_op_impl/exp1.py new file mode 100644 index 0000000..7463ecf --- /dev/null +++ b/vega/op_search/davinci_op_impl/exp1.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This is a class for Exp1.""" +from mindspore.ops import prim_attr_register, PrimitiveWithInfer + + +class Exp1(PrimitiveWithInfer): + """Define Exp1 primitive.""" + + @prim_attr_register + def __init__(self): + self.init_prim_io_names(inputs=['x'], outputs=['y']) + from exp1_impl import Exp1Impl + + def infer_shape(self, data_shape): + """Infer shape.""" + return data_shape + + def infer_dtype(self, data_dtype): + """Infer dtype.""" + return data_dtype diff --git a/vega/op_search/davinci_op_impl/exp1_impl.py b/vega/op_search/davinci_op_impl/exp1_impl.py new file mode 100644 index 0000000..9d98b8a --- /dev/null +++ b/vega/op_search/davinci_op_impl/exp1_impl.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This is a class for Exp1.""" +from __future__ import absolute_import +from te import tvm +from topi import generic +import te.lang.cce +from topi.cce import util +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType + + +def exp1_compute(input_x): + """Compute function of the CusSquare implementation.""" + dtype = input_x.dtype + x_mul = te.lang.cce.vmuls(input_x, tvm.const(0.25, dtype)) + add1 = te.lang.cce.vadds(x_mul, tvm.const(1, dtype)) + power2 = te.lang.cce.vmul(add1, add1) + power4 = te.lang.cce.vmul(power2, power2) + + return power4 + + +# Define the kernel info of CusSquare. +exp1_op_info = TBERegOp("Exp1") \ + .fusion_type("ELEMWISE") \ + .partial_flag(True) \ + .async_flag(False) \ + .binfile_name("exp1.so") \ + .compute_cost(10) \ + .kernel_name("Exp1Impl") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .get_op_info() + + +# Binding kernel info with the kernel implementation. +@op_info_register(exp1_op_info) +def Exp1Impl(input_x, output_y, kernel_name="Exp1Impl"): + """Entry function of the CusSquare implementation.""" + shape = input_x.get("shape") + dtype = input_x.get("dtype").lower() + + shape = util.shape_refine(shape) + data = tvm.placeholder(shape, name="data", dtype=dtype.lower()) + + with tvm.target.cce(): + res = exp1_compute(data) + sch = generic.auto_schedule(res) + + config = {"print_ir": False, + "name": kernel_name, + "tensor_list": [data, res]} + + te.lang.cce.cce_build_code(sch, config) diff --git a/vega/op_search/davinci_op_impl/mish1.py b/vega/op_search/davinci_op_impl/mish1.py new file mode 100644 index 0000000..1f9d5f7 --- /dev/null +++ b/vega/op_search/davinci_op_impl/mish1.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This is a class for Mish1.""" +from mindspore.ops import prim_attr_register, PrimitiveWithInfer + + +class Mish1(PrimitiveWithInfer): + """Define Mish1 CusSquare primitive.""" + + @prim_attr_register + def __init__(self): + self.init_prim_io_names(inputs=['x'], outputs=['y']) + from mish1_impl import Mish1Impl + + def infer_shape(self, data_shape): + """Infer shape.""" + return data_shape + + def infer_dtype(self, data_dtype): + """Infer dtype.""" + return data_dtype diff --git a/vega/op_search/davinci_op_impl/mish1_impl.py b/vega/op_search/davinci_op_impl/mish1_impl.py new file mode 100644 index 0000000..036555a --- /dev/null +++ b/vega/op_search/davinci_op_impl/mish1_impl.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This is a class for Mish1.""" +from __future__ import absolute_import +import functools +from te import tvm +from topi import generic +import te.lang.cce +from topi.cce import util +from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType +from te.utils import para_check + + +def mish1_compute(input_x): + """Define Mish1 implementation.""" + dtype = input_x.dtype + x_exp = te.lang.cce.vexp(input_x) + x_mul = te.lang.cce.vmuls(x_exp, tvm.const(2, dtype)) + x_exp_2 = te.lang.cce.vexp(x_mul) + x_add_1 = te.lang.cce.vadds(x_exp_2, tvm.const(1, dtype)) + x_rec = te.lang.cce.vrec(x_add_1) + x_mul_2 = te.lang.cce.vmuls(x_rec, tvm.const(-2, dtype=dtype)) + x_add_2 = te.lang.cce.vadds(x_mul_2, tvm.const(1, dtype=dtype)) + res = te.lang.cce.vmul(input_x, x_add_2) + + return res + + +# Define the kernel info of CusSquare. +mish1_op_info = TBERegOp("Mish1") \ + .fusion_type("ELEMWISE") \ + .partial_flag(True) \ + .async_flag(False) \ + .binfile_name("mish1.so") \ + .compute_cost(10) \ + .kernel_name("Mish1Impl") \ + .input(0, "x", False, "required", "all") \ + .output(0, "y", False, "required", "all") \ + .dtype_format(DataType.F32_Default, DataType.F32_Default) \ + .dtype_format(DataType.F16_Default, DataType.F16_Default) \ + .get_op_info() + + +# Binding kernel info with the kernel implementation. +@op_info_register(mish1_op_info) +def Mish1Impl(input_x, output_y, kernel_name="Mish1Impl"): + """Entry function of the CusSquare implementation.""" + shape = input_x.get("shape") + dtype = input_x.get("dtype").lower() + + shape = util.shape_refine(shape) + data = tvm.placeholder(shape, name="data", dtype=dtype.lower()) + + with tvm.target.cce(): + res = mish1_compute(data) + sch = generic.auto_schedule(res) + + config = {"print_ir": False, + "name": kernel_name, + "tensor_list": [data, res]} + + te.lang.cce.cce_build_code(sch, config) diff --git a/vega/op_search/davinci_op_impl/test.py b/vega/op_search/davinci_op_impl/test.py new file mode 100644 index 0000000..de264ac --- /dev/null +++ b/vega/op_search/davinci_op_impl/test.py @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This is a class for Test.""" +import time +import numpy as np +import mindspore.nn as nn +import mindspore.context as context +from mindspore import Tensor +from exp1 import Exp1 +from mindspore.ops import operations as P + +context.set_context(mode=context.GRAPH_MODE, device_target="Ascend") + + +class My_exp(nn.Cell): + """Mish activation method.""" + + def __init__(self): + super(My_exp, self).__init__() + self.exp1 = Exp1() + + def construct(self, x): + """Forward Mish.""" + return self.exp1(x) + + +class Init_exp(nn.Cell): + """Mish activation method.""" + + def __init__(self): + super(Init_exp, self).__init__() + self.exp = P.Exp() + + def construct(self, x): + """Forward Mish.""" + return self.exp(x) + + +class My_Mish(nn.Cell): + """Mish activation method.""" + + def __init__(self): + super(My_Mish, self).__init__() + self.exp1 = Exp1() + self.pow = P.Pow() + + def construct(self, x): + """Forward Mish.""" + return x * (1 - 2 / (self.pow(1 + self.exp1(x), 2) + 1)) + + +class Init_Mish(nn.Cell): + """Mish net definition.""" + + def __init__(self): + super(Init_Mish, self).__init__() + self.mish = P.Mish() + + def construct(self, x): + """Forward Mish.""" + out = self.mish(x) + return out + + +class Splice_Mish(nn.Cell): + """Mish activation method.""" + + def __init__(self): + super(Splice_Mish, self).__init__() + self.mul = P.Mul() + self.tanh = P.Tanh() + self.softplus = P.Softplus() + + def construct(self, input_x): + """Forward Mish.""" + res1 = self.softplus(input_x) + tanh = self.tanh(res1) + output = self.mul(input_x, tanh) + return output + + +repeat_times = 10000 + +data_size = 32 * 64 * 112 * 112 +test_data = np.linspace(num=data_size, start=-10, stop=10).astype(np.float32) +test_data = test_data.reshape(32, 64, 112, 112) +print(test_data.shape) + +print("the max/min of input:", np.max(test_data), np.min(test_data)) +test_tensor = Tensor(test_data) + +net = Init_exp() +start_time = time.time() +for i in range(repeat_times): + if i == 1: + print("the compile time is:", time.time() - start_time) + out2 = net(test_tensor) + start_time = time.time() + else: + out2 = net(test_tensor) + +end_time = time.time() + +print("cost time of exp(impl by default):", end_time - start_time) + +net = My_exp() +start_time = time.time() +for i in range(repeat_times): + if i == 1: + print("the compile time is:", time.time() - start_time) + out1 = net(test_tensor) + start_time = time.time() + else: + out1 = net(test_tensor) + +end_time = time.time() + +print("cost time of exp(impl by me):", end_time - start_time) + +net = Init_Mish() +start_time = time.time() +for i in range(repeat_times): + if i == 1: + print("the compile time is:", time.time() - start_time) + out4 = net(test_tensor) + start_time = time.time() + else: + out4 = net(test_tensor) + +end_time = time.time() + +print("cost time of mish(impl by default):", end_time - start_time) + +net = Splice_Mish() +start_time = time.time() +for i in range(repeat_times): + if i == 1: + print("the compile time is:", time.time() - start_time) + out3 = net(test_tensor) + start_time = time.time() + else: + out3 = net(test_tensor) + +end_time = time.time() + +print("cost time of mish(impl by splice):", end_time - start_time) + +net = My_Mish() +start_time = time.time() +for i in range(repeat_times): + if i == 1: + print("the compile time is:", time.time() - start_time) + out3 = net(test_tensor) + start_time = time.time() + else: + out3 = net(test_tensor) + +end_time = time.time() + +print("cost time of mish(impl by me):", end_time - start_time) diff --git a/vega/op_search/generate_data.py b/vega/op_search/generate_data.py new file mode 100644 index 0000000..935dce9 --- /dev/null +++ b/vega/op_search/generate_data.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This script is used to generate data.""" +import numpy as np +from utils import cal_mish, cal_gelu, cal_softplus, cal_tanh, cal_sqrt +from vega.common import FileOps + +input_data = np.random.random([1, 3, 224, 224]).astype(np.float32) * 20 - 10 +out_mish = cal_mish(input_data) +out_gelu = cal_gelu(input_data) +out_softplus = cal_softplus(input_data) +out_tanh = cal_tanh(input_data) +sqrt_input_data = np.random.random([1, 3, 224, 224]).astype(np.float32) * 20 +out_sqrt = cal_sqrt(sqrt_input_data) +if __name__ == "__main__": + FileOps.dump_pickle(input_data, "./input.pkl") + FileOps.dump_pickle(out_mish, "./out_mish.pkl") + FileOps.dump(out_gelu, "./out_gelu.pkl") + FileOps.dump(out_gelu, "./out_softplus.pkl") + FileOps.dump(out_tanh, "./out_tanh.pkl") + FileOps.dump(out_sqrt, "./out_sqrt.pkl") diff --git a/vega/op_search/main.py b/vega/op_search/main.py new file mode 100644 index 0000000..f7c514c --- /dev/null +++ b/vega/op_search/main.py @@ -0,0 +1,100 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This is the main script.""" +import logging +import time +import os +from vega.common.arg_parser import argment_parser +from vega.common import FileOps +from vega import security +from vega.common.dag import DAG +from .utils import dag2compute, cal_error_threshold +from .automl_zero import AutomlZero + +logging.basicConfig(level=logging.INFO) + +parser = argment_parser(desc='Automl-zero.') +parser.add_argument('--pop_num', type=int, default=1000, help='population number.') +parser.add_argument('--max_sample', type=int, default=2000000, help='max sample number.') +parser.add_argument('--save_csv', type=str, default='population.csv', help='the csv file to save the population.') +parser.add_argument('--input_data', type=str, default='./input.pkl', help='the input data file.') +parser.add_argument('--output_data', type=str, default='./out_mish.pkl', help='the real output data file.') +parser.add_argument('--threshold', type=int, default=1, help='the real output data file.') +parser.add_argument('--search_space', type=str, default=None, help='the search space yml file') + +args = parser.parse_args() +security.check_args(args) + + +def main(): + """Process of vega op search.""" + nas = AutomlZero(population_num=args.pop_num, max_sample=args.max_sample, search_space=args.search_space) + start_time = time.time() + total_sample = 0 + valid_sample = 0 + invalid_sample = 0 + + input_data = FileOps.load_pickle(args.input_data) + real_output = FileOps.load_pickle(args.output_data) + + csv_file = args.save_csv + if os.path.exists(csv_file): + os.remove(csv_file) + while not nas.is_finished(): + sample = nas.search() + if sample is None: + logging.debug("continue because sample is None.") + continue + logging.debug("Sample a formula: {}.".format(sample)) + dag = DAG() + dag.from_dict(sample) + res = dag2compute(dag, input_data) + + fitness = cal_error_threshold(res, real_output) + if fitness <= args.threshold: + if not nas._check_equivalence(fitness): + valid_sample += 1 + # update population + nas.update_fitness(num_id=total_sample, sample=sample, fitness=fitness) + if fitness < 0.01: + logging.info("number: {} is a perfect sample, the fitness is: {}.".format(total_sample, fitness)) + elif fitness < 0.1: + logging.info("number: {} is a good sample, the fitness is: {}.".format(total_sample, fitness)) + else: + logging.info("number: {} is a valid sample, the fitness is: {}.".format(total_sample, fitness)) + logging.info("The sample is: {}.".format(sample)) + else: + logging.info( + "number: {} is a equivalence, skip it, the fitness if {}.".format(total_sample, fitness)) + else: + invalid_sample += 1 + logging.info( + "number: {} is a invalid sample, because is not close enough, fitness is {}.".format(total_sample, + fitness)) + total_sample += 1 + nas.population.to_csv(csv_file) + + end_time = time.time() + logging.info(f"the best gene is: {nas.select_elite()}") + logging.info(f"total time: {end_time - start_time}") + logging.info(f"total samples: {total_sample}, valid: {valid_sample}, invalid: {invalid_sample}") + logging.info(f"sample per seconds:{total_sample / (end_time - start_time)}") + logging.info(f"valid sample per seconds:{valid_sample / (end_time - start_time)}") + + +if __name__ == "__main__": + main() diff --git a/vega/op_search/mish.py b/vega/op_search/mish.py new file mode 100644 index 0000000..d3b270e --- /dev/null +++ b/vega/op_search/mish.py @@ -0,0 +1,53 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""This is a class for Mish.""" +import torch.nn as nn +import torch +import torch.nn.functional as F + + +class Mish_init(nn.Module): + """Define Mish init.""" + + def __init__(self): + super(Mish, self).__init__() + + def forward(self, x): + """Forward mish.""" + return x * torch.tanh(F.softplus(x)) + + +class Mish(nn.Module): + """Define Mish.""" + + def __init__(self): + super(Mish, self).__init__() + + def forward(self, x): + """Forward mish.""" + return x * torch.tanh(torch.log(torch.exp(x) + 1)) + + +class Mish1(nn.Module): + """Defiine new Mish.""" + + def __init__(self): + super(Mish1, self).__init__() + + def forward(self, x): + """Forward mish.""" + return x * torch.tanh(torch.exp(x)) diff --git a/vega/op_search/ops.py b/vega/op_search/ops.py new file mode 100644 index 0000000..5ba946b --- /dev/null +++ b/vega/op_search/ops.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Define the operation.""" +import logging +import numpy as np + +input_nodes = ["in"] +constant_nodes = ['const1', 'const2', 'const4', 'const8', 'const16', 'const32'] +output_nodes = ["out"] +unary_ops = ["rec", 'power2', 'power3', 'power4', 'power6', 'power8', 'power16', 'power32', 'negative', 'sin', 'cos', + 'tan', 'tanh', 'exp', 'log', 'abs'] +binary_ops = ["add", "sub", "mul", "div"] + +MAX_LEN_OF_FORMULA = 20 + +constant_values = {'const1': 1, + 'const2': 2, + 'const3': 3, + 'const4': 4, + 'const5': 5, + 'const6': 6, + 'const7': 7, + 'const8': 8, + 'const16': 16, + 'const32': 32, + 'const.5': 0.5} + + +def _factorial(n): + if isinstance(n, int) and n >= 1: + res = 1 + for i in range(1, n + 1): + res = res * i + return res + else: + logging.debug("Only int number support factorial.") + return n + + +compute_funcs = {'abs': lambda x: np.abs(x), + 'exp': lambda x: np.exp(x), + 'log': lambda x: np.log(x), + 'sin': lambda x: np.sin(x), + 'cos': lambda x: np.cos(x), + 'rec': lambda x: 1 / x, + 'tanh': lambda x: np.tanh(x), + 'tan': lambda x: np.tan(x), + 'power2': lambda x: np.power(x, 2), + 'power3': lambda x: np.power(x, 3), + 'power4': lambda x: np.power(x, 4), + 'power5': lambda x: np.power(x, 5), + 'power6': lambda x: np.power(x, 6), + 'power7': lambda x: np.power(x, 7), + 'power8': lambda x: np.power(x, 8), + 'power16': lambda x: np.power(x, 16), + 'power32': lambda x: np.power(x, 32), + 'factorial': lambda x: _factorial(x), + 'negative': lambda x: -1 * x, + # binary ops + 'add': lambda x, y: x + y, + 'sub': lambda x, y: x - y, + 'mul': lambda x, y: x * y, + 'div': lambda x, y: x / y, + } + +invalid_conditions = {'in': [], + 'const1': ['abs', 'rec', 'power2', 'power3', 'div', 'mul', 'factorial'], + 'const2': ['abs'], + 'const3': ['abs'], + 'const4': ['abs'], + 'const5': ['abs'], + 'const6': ['abs'], + 'const7': ['abs'], + 'const8': ['abs'], + 'const.5': ['abs', 'factorial'], + 'exp': ['abs'], + 'power2': ['abs'], + 'power4': ['abs'], + 'power6': ['abs'], + 'abs': ['abs'], + 'rec': ['rec'], + 'negative': ['abs', 'negative'], + } + +mish_dict = {'in': ['exp', 'mul'], + 'const1': ['add'], + 'exp': ['add'], + 'add': ['log'], + 'log': ['tanh'], + 'tanh': ['mul'], + 'mul': ['out'], + 'out': [] + } + +init_dict = {'in': ['out'], + 'out': [] + } + + +def filter_rules(code): + """Filter the valid sample.""" + for op_type in invalid_conditions.keys(): + if not _check_downstream(code, op_type, invalid_conditions[op_type]): + return False + return True + + +def _check_downstream(code, op_type, invalid_downstreams): + for node in code.keys(): + if node.split('-')[0] == op_type: + if code[node] == invalid_downstreams: + return False + for edge in code[node]: + edge_type = edge.split('-')[0] + if edge_type in invalid_downstreams: + return False + + return True + + +def _check_op_exist(code, op_type): + flag = False + for key in code.keys(): + if key.startswith(op_type): + flag = True + break + return flag diff --git a/vega/op_search/utils.py b/vega/op_search/utils.py new file mode 100644 index 0000000..18f5e81 --- /dev/null +++ b/vega/op_search/utils.py @@ -0,0 +1,137 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""These are some tool function.""" +import logging +import numpy as np +from .ops import constant_values, compute_funcs + + +def get_upstreams(dag, node): + """Return the upstream nodes of this node.""" + nodes = dag.nodes + res = [] + for key, value in nodes.items(): + if node in value: + res.append(key) + return res + + +def dag2compute(dag, input_data): + """Calculate the output according to the dag.""" + wait_cal_list = [] + has_cal_list = [] + res = {} + all_nodes = dag.topological_sort() + for node in all_nodes: + values = {"status": 0, "data": None} + res[node] = values + + logging.debug("The calculation nodes is :{}.".format(dag.nodes)) + for node in all_nodes: + if node.split('-')[0] == 'in': + cal_res = input_data + res[node]['data'] = cal_res + has_cal_list.append(node) + elif node.startswith('const'): + constant_type = node.split('-')[0] + cal_res = constant_values[constant_type] + res[node]['data'] = cal_res + has_cal_list.append(node) + elif node == 'out': + upstream_node = get_upstreams(dag, node)[0] + cal_res = res[upstream_node]['data'] + res[node]['data'] = cal_res + has_cal_list.append(node) + else: + upstream_nodes = get_upstreams(dag, node) + upstream_done = True + inputs = [] + for upstream_node in upstream_nodes: + if res[upstream_node]['data'] is None: + upstream_done = False + wait_cal_list.append(dag.next_nodes(node=upstream_node)) + else: + inputs.append(res[upstream_node]['data']) + + if upstream_done: + node_type = node.split('-')[0] + if len(inputs) == 1: + if node_type in ['rec'] and not isinstance(inputs[0], np.ndarray) and inputs[0] == 0: + logging.debug("To avoid zero div, y will be added a smller number.") + inputs[0] = inputs[0] + 1e-15 + cal_res = compute_funcs[node_type](inputs[0]) + elif len(inputs) == 2: + if node_type in ['div'] and not isinstance(inputs[1], np.ndarray) and inputs[1] == 0: + logging.debug("To avoid zero div, y will be added a smller number.") + inputs[1] = inputs[1] + 1e-15 + cal_res = compute_funcs[node_type](inputs[0], inputs[1]) + else: + raise ValueError("The op {} only support one or two inputs, but got {}.".format(node, len(inputs))) + res[node]['data'] = cal_res + has_cal_list.append(node) + + return res['out']['data'] + + +def cal_mish(x): + """Calculate mish.""" + return x * np.tanh(np.log(np.exp(x) + 1)) + + +def cal_gelu(x): + """Calculate gelu.""" + w = np.sqrt(2 / np.pi) + return 0.5 * x * (1 + np.tanh(w * (x + 0.044715 * x * x * x))) + + +def cal_tanh(x): + """Calculate tanh.""" + return np.tanh(x) + + +def cal_sqrt(x): + """Calculate sqrt.""" + return np.sqrt(x) + + +def cal_softplus(x): + """Calculate softplus.""" + return np.log(np.exp(x) + 1) + + +def cal_error_threshold(a, b): + """Calculate error threshold.""" + threshold = np.abs(a - b) / (np.abs(b) + 1) + return threshold.max() + + +def is_close(arr1, arr2): + """Check if two array is close or not.""" + return isinstance(arr1, np.ndarray) and len(arr1) == len(arr2) and np.allclose(arr1, arr2, atol=1, rtol=1) + + +def is_close_l2(arr1, arr2): + """Check if two array is close or not.""" + return len(arr1) == len(arr2) and np.allclose(arr1, arr2, atol=1e-4, rtol=1e-4) + + +def cal_fitness(output, real_value): + """Calculate the relative error of two array.""" + diff = output - real_value + r_error = np.abs(diff) / np.abs(real_value) + mean_error = np.mean(r_error) + return mean_error diff --git a/vega/op_search/visual.py b/vega/op_search/visual.py new file mode 100644 index 0000000..16c7d78 --- /dev/null +++ b/vega/op_search/visual.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""These are some tool function.""" +import subprocess + + +def _write_graph(dict): + with open("test.dot", "w") as file: + file.write("digraph {} {{\n".format("graph_name")) + for node, edges in dict.items(): + node = node.replace('-', '_') + for edge in edges: + edge = edge.replace('-', '_') + file.write("{}->{};\n".format(node, edge)) + file.write("}\n") + + +def visual_dag(dict, save_name): + """Visulize the compute graph.""" + _write_graph(dict) + subprocess.call(f"dot -Tpdf test.dot -o {save_name}.pdf".split(" ")) + subprocess.call(f"dot -Tpng test.dot -o {save_name}.png".split(" ")) diff --git a/vega/quota/__init__.py b/vega/quota/__init__.py index 3dcf94b..3eb7f7d 100644 --- a/vega/quota/__init__.py +++ b/vega/quota/__init__.py @@ -1,6 +1,6 @@ -from vega.common.class_factory import ClassFactory - - -ClassFactory.lazy_register("vega.quota", { - "quota": ["quota:Quota"], -}) +from vega.common.class_factory import ClassFactory + + +ClassFactory.lazy_register("vega.quota", { + "quota": ["quota:Quota"], +}) diff --git a/vega/quota/flops_params.py b/vega/quota/flops_params.py index 0c8f1d3..ab2115c 100644 --- a/vega/quota/flops_params.py +++ b/vega/quota/flops_params.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Flops and Parameters Filter.""" @@ -38,7 +44,5 @@ def verify(self, model_desc=None): logger.info(f"params ({params}) or flops ({flops}) out of range.") return result except Exception as e: - import traceback - print(traceback.format_exc()) logging.info(f"Invild model desc: {model_desc}, error: {e}") return False diff --git a/vega/quota/latency.py b/vega/quota/latency.py index 7012b90..c637d96 100644 --- a/vega/quota/latency.py +++ b/vega/quota/latency.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Flops and Parameters Filter.""" @@ -27,7 +33,7 @@ def verify_on_host(self, model_desc): """Filter function of latency.""" model = ModelZoo.get_model(model_desc) count_input = self.get_input_data() - trainer = vega.trainer(model_desc=model_desc) + trainer = vega.get_trainer(model_desc=model_desc) sess_config = trainer._init_session_config() if vega.is_tf_backend() else None latency = calc_forward_latency_on_host(model, count_input, sess_config) logging.info(f"Sampled model's latency: {latency}ms") diff --git a/vega/quota/model_valid.py b/vega/quota/model_valid.py index 03d4b55..a236bea 100644 --- a/vega/quota/model_valid.py +++ b/vega/quota/model_valid.py @@ -1,18 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Model Valid Verification.""" import logging -from .quota_item_base import QuotaItemBase +import vega from vega.model_zoo import ModelZoo +from .quota_item_base import QuotaItemBase class ModelValidVerification(QuotaItemBase): @@ -23,6 +30,9 @@ def verify(self, model_desc): try: model = ModelZoo.get_model(model_desc) count_input = self.get_input_data() + if vega.is_ms_backend(): + from mindspore import context + context.set_context(device_target="CPU") model(count_input) return True except Exception as e: diff --git a/vega/quota/quota.py b/vega/quota/quota.py index 3579918..c68ac64 100644 --- a/vega/quota/quota.py +++ b/vega/quota/quota.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Quota.""" @@ -17,7 +23,6 @@ from .flops_params import FlopsParamsVerification from .quota_affinity import QuotaAffinity from .latency import LatencyVerification -# from .runtime import RuntimeVerification logger = logging.getLogger(__name__) @@ -106,7 +111,6 @@ def adjuest_pipeline_by_runtime(self, user_config): """Adjuest pipeline by runtime.""" if not self.enable or self.runtime is None: return True - # RuntimeVerification(self.runtime).adjust_config(user_config) return True def verify_metric(self, model_desc): @@ -116,6 +120,4 @@ def verify_metric(self, model_desc): @property def quota_reached(self): """Return True if reach the limits.""" - # runtime|duration, samples|trials, metrics - # get data from report return False diff --git a/vega/quota/quota_affinity.py b/vega/quota/quota_affinity.py index 48b046c..a591fbc 100644 --- a/vega/quota/quota_affinity.py +++ b/vega/quota/quota_affinity.py @@ -1,16 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Quota for Affinity.""" import logging +import ast import pandas as pd from sklearn import ensemble @@ -40,12 +47,11 @@ def generate_input_space(self, desc): return None space_list = [] for idx in range(len(desc)): - desc_item = eval(desc.iloc[idx]) + desc_item = ast.literal_eval(desc.iloc[idx]) space_dict = {} self.init_space_dict(space_dict) for key, value in desc_item.items(): self.get_space_dict(key, value, space_dict) - # space_dict[_metric_key] = eval(pfms.iloc[idx])[_metric_key] if space_dict: space_list.append(space_dict) return pd.DataFrame(space_list) @@ -53,10 +59,10 @@ def generate_input_space(self, desc): def generate_label(self): """Generate label from affinity report.""" _pfms = self.affinity_report['performance'] - _metric_key = eval(self.affinity_report['_objective_keys'][0])[0] + _metric_key = ast.literal_eval(self.affinity_report['_objective_keys'][0])[0] label_list = [] for pfm in _pfms: - value = eval(pfm)[_metric_key] + value = ast.literal_eval(pfm)[_metric_key] clc = 1 if value > self.standard else 0 label_list.append({_metric_key: clc}) return pd.DataFrame(label_list) @@ -101,7 +107,7 @@ class QuotaAffinity(object): """Generate affinity model of search space, filter bad sample.""" def __init__(self, affinity_cfg): - affinity_class = eval(self.get_affinity_model(affinity_cfg.type)) + affinity_class = ast.literal_eval(self.get_affinity_model(affinity_cfg.type)) self.affinity_model = affinity_class(affinity_cfg.affinity_file, affinity_cfg.affinity_value) self.affinity_model.build_model() diff --git a/vega/quota/quota_item_base.py b/vega/quota/quota_item_base.py index 5ebba90..aa7b59d 100644 --- a/vega/quota/quota_item_base.py +++ b/vega/quota/quota_item_base.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Quota item base.""" @@ -21,7 +27,7 @@ def get_input_data(self): """Get input data.""" count_input = None dataset_name = PipeStepConfig.dataset.type - dataloader = vega.dataset(dataset_name).loader + dataloader = vega.get_dataset(dataset_name).loader if vega.is_torch_backend(): _iter = iter(dataloader) input_data, _ = _iter.next() diff --git a/vega/report/nsga_iii.py b/vega/report/nsga_iii.py index 74b206b..510db61 100644 --- a/vega/report/nsga_iii.py +++ b/vega/report/nsga_iii.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """NSGA-III selection algorithm.""" import numpy as np diff --git a/vega/report/record.py b/vega/report/record.py index b8fc222..6eea814 100644 --- a/vega/report/record.py +++ b/vega/report/record.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Report.""" diff --git a/vega/report/report_client.py b/vega/report/report_client.py index 3ad1278..f23a8eb 100644 --- a/vega/report/report_client.py +++ b/vega/report/report_client.py @@ -1,23 +1,28 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Report.""" import json import logging from datetime import datetime -from vega.common.file_ops import FileOps -from vega.common.task_ops import TaskOps +from vega.common import FileOps, TaskOps from vega.common.utils import remove_np_value -from .record import ReportRecord from vega.common import MessageClient from vega.common import General, Status, JsonEncoder +from .record import ReportRecord logger = logging.getLogger(__name__) diff --git a/vega/report/report_persistence.py b/vega/report/report_persistence.py index abed40f..78d5a12 100644 --- a/vega/report/report_persistence.py +++ b/vega/report/report_persistence.py @@ -1,19 +1,25 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Report.""" import json import logging import os import traceback -import pickle +from pickle import HIGHEST_PROTOCOL from vega.common import FileOps, TaskOps, JsonEncoder, Status @@ -54,8 +60,9 @@ def save_report(self, records): data = self.get_report(records) with open(_file, "w") as f: json.dump(data, f, indent=4, cls=JsonEncoder) - except Exception: - logging.warning(traceback.format_exc()) + except Exception as e: + logging.warning(f"Failed to save report, message: {e}") + logging.debug(traceback.format_exc()) def get_report(self, records): """Save report to `reports.json`.""" @@ -75,15 +82,16 @@ def get_report(self, records): else: data[record.step_name] = [record.to_dict()] return data - except Exception: - logging.warning(traceback.format_exc()) + except Exception as e: + logging.warning(f"Failed to get report, message: {e}") + logging.debug(traceback.format_exc()) def pickle_report(self, records, report_instance): """Pickle report to `.reports`.""" try: _file = os.path.join(TaskOps().step_path, ".reports") _dump_data = [records, report_instance] - with open(_file, "wb") as f: - pickle.dump(_dump_data, f, protocol=pickle.HIGHEST_PROTOCOL) - except Exception: - logging.warning(traceback.format_exc()) + FileOps.dump_pickle(_dump_data, _file, protocol=HIGHEST_PROTOCOL) + except Exception as e: + logging.warning(f"Failed to pickle report, message: {e}") + logging.debug(traceback.format_exc()) diff --git a/vega/report/report_server.py b/vega/report/report_server.py index ac212dc..0431736 100644 --- a/vega/report/report_server.py +++ b/vega/report/report_server.py @@ -1,36 +1,42 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Report.""" import json import logging import os import glob -import pickle import time import random from copy import deepcopy -import numpy as np -import pandas as pd -from threading import Lock from collections import OrderedDict +from threading import Lock from threading import Thread +import numpy as np +import pandas as pd import vega from vega.common import FileOps, TaskOps from vega.common.general import General -from .record import ReportRecord -from .report_persistence import ReportPersistence from vega.common import MessageServer from vega.common.utils import singleton from vega.common.pareto_front import get_pareto_index +from .record import ReportRecord +from .report_persistence import ReportPersistence + __all__ = ["ReportServer"] logger = logging.getLogger(__name__) @@ -138,8 +144,7 @@ def restore(cls): step_path = TaskOps().step_path _file = os.path.join(step_path, ".reports") if os.path.exists(_file): - with open(_file, "rb") as f: - data = pickle.load(f) + data = FileOps.load_pickle(_file) cls._hist_records = data[0] cls.__instances__ = data[1] @@ -330,8 +335,6 @@ def _dump_report(report_server, persistence): try: persistence.save_report(all_records) - # TODO - # persistence.pickle_report(report_server._hist_records, report_server.__instances__) report_server.backup_output_path() except Exception as e: logging.warning(f"Failed to dump reports, message={str(e)}") diff --git a/vega/security/__init__.py b/vega/security/__init__.py index e69de29..c014103 100644 --- a/vega/security/__init__.py +++ b/vega/security/__init__.py @@ -0,0 +1,25 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run pipeline.""" + +__all__ = ["load_config", "get_config", "add_args", "check_args", "check_yml", "check_msg", "post"] + +from .conf import ServerConfig, ClientConfig, Config +from .args import add_args, check_args, check_yml, check_msg +from .post import post +from .conf import load_config, get_config +from .verify_config import check_risky_file diff --git a/vega/security/args.py b/vega/security/args.py new file mode 100644 index 0000000..1a2245b --- /dev/null +++ b/vega/security/args.py @@ -0,0 +1,120 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Security args.""" +import os +import re +import yaml + + +def add_args(parser): + """Add security args.""" + _config = parser.add_argument_group(title='security setting') + _config.add_argument("-s", "--security", dest='security', action='store_true', + help="enable safe mode") + return parser + + +def _check_value(value, pattern): + if isinstance(value, str) and len(re.compile(pattern).findall(value)) > 0: + raise ValueError("{} contains invalid characters.".format(value)) + + +def _check_dict(dict_value, pattern): + """Check dict.""" + if not isinstance(dict_value, dict): + return + for item in dict_value: + value = dict_value[item] + if isinstance(value, dict): + _check_dict(value, pattern) + else: + _check_value(value, pattern) + + +def check_msg(msg): + """Check msg.""" + _check_dict(msg, pattern="[^_A-Za-z0-9\\s:/.~-]") + + +def check_args(args): + """Check args.""" + args_dict = vars(args) + _check_dict(args_dict, pattern="[^_A-Za-z0-9:/.~-]") + + +def check_yml(config_yaml): + """Check yml.""" + if config_yaml is None: + raise ValueError("config path can't be None or empty") + if os.stat(config_yaml).st_uid != os.getuid(): + raise ValueError(f"The file {config_yaml} not belong to the current user") + with open(config_yaml) as f: + raw_dict = yaml.safe_load(f) + _check_dict(raw_dict, pattern=r"[^_A-Za-z0-9\s\<\>=\[\]\(\),!\{\}:/.~-]") + + +def check_job_id(job_id): + """Check Job id.""" + if not isinstance(job_id, str): + raise TypeError('"job_id" must be str, not {}'.format(type(job_id))) + _check_value(job_id, pattern="[^_A-Za-z0-9]") + + +def check_input_shape(input_shape): + """Check input shape.""" + if not isinstance(input_shape, str): + raise TypeError('"input_shape" must be str, not {}'.format(type(input_shape))) + _check_value(input_shape, pattern="[^_A-Za-z0-9:,]") + + +def check_out_nodes(out_nodes): + """Check out nodes.""" + if not isinstance(out_nodes, str): + raise TypeError('"out_nodes" must be str, not {}'.format(type(out_nodes))) + _check_value(out_nodes, pattern="[^_A-Za-z0-9:/]") + + +def check_backend(backend): + """Check backend.""" + if backend not in ["tensorflow", "caffe", "onnx", "mindspore"]: + raise ValueError("The backend only support tensorflow, caffe, onnx and mindspore.") + + +def check_hardware(hardware): + """Check hardware.""" + if hardware not in ["Davinci", "Bolt", "Kirin990_npu"]: + raise ValueError("The hardware only support Davinci and Bolt.") + + +def check_precision(precision): + """Check precision.""" + if precision.upper() not in ["FP32", "FP16"]: + raise ValueError("The precision only support FP32 and FP16.") + + +def check_repeat_times(repeat_times): + """Check repeat times.""" + MAX_EVAL_EPOCHS = 10000 + if not isinstance(repeat_times, int): + raise TypeError('"repeat_times" must be int, not {}'.format(type(repeat_times))) + if not 0 < repeat_times <= MAX_EVAL_EPOCHS: + raise ValueError("repeat_times {} is not in valid range (1-{})".format(repeat_times, MAX_EVAL_EPOCHS)) + + +def path_verify(path): + """Verify path.""" + return re.sub(r"[^_A-Za-z0-9\/.]", "", path) diff --git a/vega/security/check_env.py b/vega/security/check_env.py new file mode 100644 index 0000000..c394a02 --- /dev/null +++ b/vega/security/check_env.py @@ -0,0 +1,25 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Check security env.""" + + +__all__ = ["check_env"] + + +def check_env(args) -> bool: + """Check security env.""" + return True diff --git a/vega/security/conf.py b/vega/security/conf.py new file mode 100644 index 0000000..4e9fa03 --- /dev/null +++ b/vega/security/conf.py @@ -0,0 +1,140 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Security config. + +~/.vega/server.ini + +[security] + ca_cert=<~/.vega/car.crt> + server_cert_dask=<~/.vega/server_dask.crt> + server_secret_key_dask=<~/.vega/server_dask.key> + client_cert_dask=<~/.vega/client_dask.crt> + client_secret_key_dask=<~/.vega/ client_dask.key> + +~/.vega/client.ini + +[security] + ca_cert=<~/.vega/car.crt> + client_cert=<~/.vega/client.crt> + client_secret_key=<~/.vega/client.key> + encrypted_password= + key_component_1=<~/.vega/ksmaster_client.dat> + key_component_2=<~/.vega/ksstandby_client.dat> + +""" + +import os +import logging +import configparser +from .verify_config import check_risky_files + + +class Config(): + """Security Config.""" + + def load(self) -> bool: + """Load from config file.""" + if not check_risky_files([self.file_name]): + return False + config = configparser.ConfigParser() + try: + config.read(self.file_name) + except Exception: + logging.error(f"Failed to read setting from {self.file_name}") + return False + if "security" not in config.sections(): + return False + keys = [] + pass_check_keys = ["encrypted_password", "white_list"] + for key in config["security"]: + if key not in self.keys: + return False + setattr(self, key, config.get("security", key)) + if key not in pass_check_keys and not check_risky_files([config.get("security", key)]): + return False + keys.append(key) + if len(keys) != len(self.keys): + missing_keys = list(set(self.keys) - set(keys)) + logging.error(f"setting items {missing_keys} are missing in {self.file_name}") + return False + return True + + +class ServerConfig(Config): + """Security Config.""" + + def __init__(self): + """Initialize.""" + self.ca_cert = None + self.server_cert_dask = None + self.server_secret_key_dask = None + self.client_cert_dask = None + self.client_secret_key_dask = None + self.file_name = os.path.expanduser("~/.vega/server.ini") + self.keys = ["ca_cert", "server_cert_dask", "server_secret_key_dask", "client_cert_dask", + "client_secret_key_dask"] + + +class ClientConfig(Config): + """Security Config.""" + + def __init__(self): + """Initialize.""" + self.ca_cert = None + self.client_cert = None + self.client_secret_key = None + self.encrypted_password = None + self.key_component_1 = None + self.key_component_2 = None + self.white_list = [] + self.file_name = os.path.expanduser("~/.vega/client.ini") + self.keys = [ + "ca_cert", "client_cert", "client_secret_key", "encrypted_password", + "key_component_1", "key_component_2", "white_list"] + + +_server_config = ServerConfig() +_client_config = ClientConfig() + + +def load_config(_type: str) -> bool: + """Load security config.""" + if _type not in ["all", "server", "client"]: + logging.error(f"not support security config type: {_type}") + return False + if _type in ["server", "all"]: + global _server_config + if not _server_config.load(): + logging.error("load server security config fail.") + return False + if _type in ["client", "all"]: + global _client_config + if not _client_config.load(): + logging.error("load client security config fail.") + return False + return True + + +def get_config(_type: str) -> Config: + """Get config.""" + if _type not in ["server", "client"]: + logging.error(f"not support security config type: {_type}") + return False + if _type == "server": + return _server_config + else: + return _client_config diff --git a/vega/security/config_op.py b/vega/security/config_op.py deleted file mode 100644 index 4147f98..0000000 --- a/vega/security/config_op.py +++ /dev/null @@ -1,129 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Run pipeline.""" - -import configparser -import os -from argparse import ArgumentParser - - -def read_config_file(): - """Read config file and return ConfigParser.""" - vega_config_file = os.path.join(os.environ['HOME'], ".vega", "vega.ini") - if not os.path.exists(vega_config_file): - print(f"Not found vega security configure file: {vega_config_file}") - return None - config = configparser.ConfigParser() - config.read(vega_config_file) - return config - - -def _parse_args(): - parser = ArgumentParser("Vega Configuration") - group_resume = parser.add_mutually_exclusive_group(required=True) - group_resume.add_argument("-i", "--init", action='store_true', - help="init vega security config file") - group_resume.add_argument("-q", "--query", type=str, choices=["sec", "https"], - help="query current vega security setting") - group_resume.add_argument("-s", "--set", type=int, choices=[0, 1], - help="set vega security mode to be on or off") - group_resume.add_argument("-m", "--module", type=str, choices=["https"], - help="set cert/key file of each module") - - group_config = parser.add_argument_group(title='cert key files') - group_config.add_argument("-ca", "--ca-cert", default=None, type=str, - help="ca cert file") - group_config.add_argument("-c", "--cert", default=None, type=str, - help='server cert file') - group_config.add_argument("-p", "--public-key", default=None, type=str, - help="server public key file") - group_config.add_argument("-k", "--secret-key", default=None, type=str, - help="server secret key file") - group_config.add_argument("-ck", "--cli-secret-key", default=None, type=str, - help="client secret key file") - args = parser.parse_args() - return args - - -def _init_config_file(): - vega_dir = os.path.join(os.getenv("HOME"), ".vega") - os.makedirs(vega_dir, exist_ok=True) - vega_config_file = os.path.join(vega_dir, "vega.ini") - if os.path.exists(vega_config_file): - print("vega config file ({}) already exists.".format(vega_config_file)) - return - with open(vega_config_file, "w") as f: - f.write("[security]\n") - f.write("enable=True\n") - f.write("\n") - f.write("[https]\n") - f.write("cert_pem_file=\n") - f.write("secret_key_file=\n") - f.write("\n") - f.write("[limit]\n") - f.write("request_frequency_limit=100/minute\n") - f.write("max_content_length=1000000000\n") - f.write("#white_list=0.0.0.0,127.0.0.1\n") - print("initializing vega config file ({}).".format(vega_config_file)) - - -def _process_cmd(args): - if args.init: - _init_config_file() - return - config = read_config_file() - if not config: - return - if args.query: - config = _process_cmd_query(args, config) - return - if args.set is not None: - if args.set == 1: - config.set("security", "enable", "True") - print("set vega security mode to True") - else: - config.set("security", "enable", "False") - print("set vega security mode to False") - elif args.module is not None: - config = _process_cmd_module(args, config) - with open(os.path.join(os.environ['HOME'], ".vega", "vega.ini"), "w") as f: - config.write(f) - - -def _process_cmd_query(args, config): - if args.query == "sec": - print(str(config["security"]["enable"])) - elif args.query == "https": - print("cert_pem_file: {}".format( - config["https"]["cert_pem_file"] if "cert_pem_file" in config["https"] else None)) - print("secret_key_file: {}".format( - config["https"]["secret_key_file"] if "secret_key_file" in config["https"] else None)) - return config - - -def _process_cmd_module(args, config): - print("set cert/key file of module {}".format(args.module)) - if args.module == "https": - if args.cert: - config.set("https", "cert_pem_file", args.cert) - if args.secret_key: - config.set("https", "secret_key_file", args.secret_key) - return config - - -def vega_config_operate(): - """Run pipeline.""" - args = _parse_args() - _process_cmd(args) - - -if __name__ == '__main__': - vega_config_operate() diff --git a/vega/security/kill.py b/vega/security/kill.py deleted file mode 100644 index 8091f74..0000000 --- a/vega/security/kill.py +++ /dev/null @@ -1,218 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Kill vega progress.""" - -import os -import signal -import psutil -import time -from vega.common import argment_parser -from vega.tools.query_process import query_process, get_pid, query_processes, get_vega_pids, print_process -from .run_pipeline import check_env - - -def _parse_args(desc): - parser = argment_parser(desc) - group = parser.add_mutually_exclusive_group(required=True) - group.add_argument("-p", "--pid", type=int, - help="kill Vega main process based on the specified process ID") - group.add_argument("-t", "--task_id", type=str, - help="kill Vega main process based on the specified Vega application task ID") - group.add_argument("-a", "--all", action='store_true', - help="kill all Vega main process") - group.add_argument("-f", "--force", action='store_true', - help="Forcibly kill all Vega-related processes even if the main process does not exist") - args = parser.parse_args() - return args - - -def _kill_vega_process(pid): - if not psutil.pid_exists(pid): - print("The Vega process {} does not exist.".format(pid)) - return - if pid not in get_vega_pids(): - print("Process {} is not the main Vega process.".format(pid)) - return - print_process(query_process(pid)) - print("") - _input = input("Do you want kill vega processes? [Y/n]: ") - if _input.upper() in ["N", "NO"]: - print("Operation was cancelled.") - os._exit(0) - - spids = _get_sub_processes(pid) - print("Start to kill Vega process {}.".format(pid)) - try: - os.kill(pid, signal.SIGINT) - except Exception: - pass - _wait(3) - spids.append(pid) - not_stoped = _check_exited(spids) - for pid in not_stoped: - try: - os.kill(pid, signal.SIGKILL) - except Exception: - pass - _wait(5) - print("") - not_stoped = _check_exited(not_stoped) - if _check_exited(not_stoped): - print("Warning: The following processes do not exit completely.") - print(not_stoped) - else: - print("All Vega processes have been killed.") - - -def _kill_vega_process_by_task_id(task_id): - pid = get_pid(task_id) - if not pid: - print("Task ID {} is not the task ID of a Vega process.".format(task_id)) - return - _kill_vega_process(pid) - - -def _kill_all_vega_process(): - pids = get_vega_pids() - if not pids: - print("The Vega main program is not found.") - return - - print("Vega processes:") - for key, value in query_processes().items(): - print("{}:".format(key)) - print_process(value) - print("") - _input = input("Do you want kill all vega processes? [Y/n]: ") - if _input.upper() in ["N", "NO"]: - print("Operation was cancelled.") - os._exit(0) - - all_spids = [] - all_spids.extend(pids) - for pid in pids: - spids = _get_sub_processes(pid) - all_spids.extend(spids) - print("Start to kill the Vega process {}".format(pid)) - try: - os.kill(pid, signal.SIGINT) - except Exception: - pass - _wait(3) - not_stoped = _check_exited(all_spids) - for pid in not_stoped: - try: - os.kill(pid, signal.SIGKILL) - except Exception: - pass - _wait(5) - print("") - not_stoped = _check_exited(not_stoped) - if _check_exited(not_stoped): - print("Warning: The following processes do not exit completely.") - print(not_stoped) - else: - print("All Vega processes have been killed.") - - -def _get_sub_processes(pid, cpids=[]): - p = psutil.Process(pid) - for cp in p.children(): - cpid = cp.pid - cpids.append(cpid) - try: - _get_sub_processes(cpid, cpids) - except Exception: - pass - return cpids - - -def _force_kill(): - vega_pids = _get_all_related_processes() - if not vega_pids: - print("No Vega-releted progress found.") - return - - _input = input("Do you want kill all Vega-related processes? [Y/n]: ") - if _input.upper() in ["N", "NO"]: - print("Operation was cancelled.") - os._exit(0) - - vega_pids = _get_all_related_processes() - print("Start to kill all Vega-related processes.") - for pid in vega_pids: - try: - os.kill(pid, signal.SIGKILL) - except Exception: - pass - _wait(5) - print("") - not_stoped = _check_exited(vega_pids) - if not_stoped: - print("Warning: The following processes do not exit completely.") - print(not_stoped) - else: - print("All Vega-related processes have been killed.") - - -def _get_all_related_processes(): - pids = psutil.pids() - vega_pids = [] - for pid in pids: - try: - p = psutil.Process(pid) - except Exception: - continue - if p.name() in ["vega", "dask-scheduler", "dask-worker", "vega-main"]: - vega_pids.append(pid) - vega_pids.extend(_get_sub_processes(pid)) - continue - cmd = " ".join(p.cmdline()) - if "/bin/vega-kill" in cmd or "/bin/vega-process" in cmd or "/bin/vega-progress" in cmd: - continue - if "vega.tools.run_pipeline" in cmd or "vega.trainer.deserialize" in cmd or "/bin/vega" in cmd: - vega_pids.append(pid) - vega_pids.extend(_get_sub_processes(pid)) - continue - vega_pids = set(vega_pids) - return vega_pids - - -def _check_exited(pids): - not_killed = [] - for pid in pids: - if psutil.pid_exists(pid): - not_killed.append(pid) - return not_killed - - -def _wait(seconds): - for _ in range(seconds * 2): - print("*", end="", flush=True) - time.sleep(0.5) - - -def _kill(): - if not check_env(): - return - args = _parse_args("Kill Vega processes.") - if args.pid: - _kill_vega_process(args.pid) - elif args.task_id: - _kill_vega_process_by_task_id(args.task_id) - elif args.all: - _kill_all_vega_process() - elif args.force: - _force_kill() - - -if __name__ == "__main__": - _kill() diff --git a/vega/security/kmc/encrypt_key.py b/vega/security/kmc/encrypt_key.py new file mode 100644 index 0000000..7691c1d --- /dev/null +++ b/vega/security/kmc/encrypt_key.py @@ -0,0 +1,121 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Load the Certificate and encrypt the passwd.""" + +import argparse +import getpass +import logging +import subprocess +from OpenSSL.crypto import load_certificate, FILETYPE_PEM, load_privatekey +from . import kmc +from .utils import check_password_rule + + +def encrypt_mm(origin_mm, key_component_1, key_component_2): + """Encrypt the passwd.""" + ret = kmc.init(key_component_1, key_component_2, 9) + if ret is False: + logging.error("kmc init error.") + return "" + domain_id = 0 + result = kmc.encrypt(domain_id, origin_mm) + kmc.finalize() + return result + + +def validate_certificate(cert, key, origin_mm): + """Validate the certificate.""" + flag = True + with open(key, "r", encoding="utf-8") as f: + key_value = f.read() + try: + load_privatekey(FILETYPE_PEM, key_value, passphrase=origin_mm.encode('utf-8')) + except Exception: + flag = False + logging.error("Wrong PEM.") + return flag + + # check signature algorithm + with open(cert, "r", encoding="utf-8") as f: + cert_value = f.read() + cert_value = load_certificate(FILETYPE_PEM, cert_value) + enc_algorithm = cert_value.get_signature_algorithm() + if enc_algorithm in b'sha1WithRSAEncryption' b'md5WithRSAEncryption': + logging.warning("Insecure encryption algorithm: %s", enc_algorithm) + # check key length + + p1 = subprocess.Popen(["openssl", "x509", "-in", cert, "-text", "-noout"], + stdout=subprocess.PIPE, shell=False) + p2 = subprocess.Popen(["grep", "RSA Public-Key"], stdin=p1.stdout, stdout=subprocess.PIPE, shell=False) + p3 = subprocess.Popen(["tr", "-cd", "[0-9]"], stdin=p2.stdout, stdout=subprocess.PIPE, shell=False) + RSA_key = p3.communicate()[0] + if int(RSA_key) < 2048: + logging.warning("Insecure key length: %d", int(RSA_key)) + return flag + + +def import_certificate(args, origin_mm): + """Load the certificate.""" + # 1.validate private key and certification, if not pass, program will exit + ret = validate_certificate(args.cert, args.key, origin_mm) + if not ret: + logging.error("Validate certificate failed.") + return 0 + + # 2.encrypt private key's passwd. + encrypt = encrypt_mm(origin_mm, args.key_component_1, args.key_component_2) + if not encrypt: + logging.error("kmc encrypt private key error.") + return 0 + logging.warning(f"Encrypt sucuess. The encrypted of your input is {encrypt}") + logging.warning(f"The key components are {args.key_component_1} and {args.key_component_2}, please keep it safe.") + + return True + + +def args_parse(): + """Parse the input args.""" + parser = argparse.ArgumentParser(description='Certificate import') + parser.add_argument("--cert", default="./kmc/config/crt/sever.cert", type=str, + help="The path of certificate file") + parser.add_argument("--key", default='./kmc/config/crt/sever.key', type=str, + help="The path of private Key file.") + parser.add_argument("--key_component_1", default='./kmc/config/ksf/ksmaster.dat', type=str, + help="key material 1.") + parser.add_argument("--key_component_2", default='./kmc/config/ksf/ksstandby.dat', type=str, + help="key material 2.") + + args = parser.parse_args() + + return args + + +def main(): + """Run the encrypt process.""" + args = args_parse() + logging.info("process encrypt begin.") + origin_mm = getpass.getpass("Please enter the password to be encrypted: ") + if not check_password_rule(origin_mm): + logging.info("You should re-generate your server cert/key with following rules:") + logging.info("1. equals to or longer than 8 letters") + logging.info("2. contains at least one digit letter") + logging.info("3. contains at least one capital letter") + logging.info("4. contains at least one lowercase letter") + + ret = import_certificate(args, origin_mm) + if not ret: + logging.error("Encrypt failed.") diff --git a/vega/security/kmc/kmc.py b/vega/security/kmc/kmc.py new file mode 100644 index 0000000..2dcf548 --- /dev/null +++ b/vega/security/kmc/kmc.py @@ -0,0 +1,228 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Huawei KMC library.""" + +import ctypes +import os +from ctypes.util import find_library +import logging +import platform + +__all__ = ["init", "encrypt", "decrypt", "check_and_update_mk", "update_root_key", "hmac", "hmac_verify", "finalize"] + +_kmc_dll: ctypes.CDLL = None +_libc_dll: ctypes.CDLL = None +ADVANCE_DAY = 3 + + +def hmac(domain_id: int, plain_text: str) -> str: + """Encode HMAC code.""" + p_char = ctypes.c_char_p() + hmac_len = ctypes.c_int(0) + c_plain_text = ctypes.create_string_buffer(plain_text.encode()) + _kmc_dll.KeHmacByDomain.restype = ctypes.c_int + _kmc_dll.KeHmacByDomain.argtypes = [ + ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), ctypes.POINTER(ctypes.c_int)] + ret = _kmc_dll.KeHmacByDomain( + domain_id, c_plain_text, len(plain_text), ctypes.byref(p_char), ctypes.pointer(hmac_len)) + if ret != 0: + logging.error(f"failed to call KeHmacByDomain, code={ret}") + value = p_char.value.decode() + ret = _libc_dll.free(p_char) + if ret != 0: + logging.error(f"failed to free resource, code={ret}") + return value + + +def hmac_verify(domain_id: int, plain_text: str, hmac_text: str) -> bool: + """Verify HMAC code.""" + c_plain_text = ctypes.create_string_buffer(plain_text.encode()) + c_hmac_text = ctypes.create_string_buffer(hmac_text.encode()) + _kmc_dll.KeHmacVerifyByDomain.restype = ctypes.c_int + _kmc_dll.KeHmacVerifyByDomain.argtypes = [ + ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_int] + ret = _kmc_dll.KeHmacVerifyByDomain(domain_id, c_plain_text, len(plain_text), c_hmac_text, len(c_hmac_text)) + return ret + + +def encrypt(domain_id: int, plain_text: str) -> str: + """Encrypt.""" + p_char = ctypes.c_char_p() + cipher_len = ctypes.c_int(0) + c_plain_text = ctypes.create_string_buffer(plain_text.encode()) + + _kmc_dll.KeEncryptByDomain.restype = ctypes.c_int + _kmc_dll.KeEncryptByDomain.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), + ctypes.POINTER(ctypes.c_int)] + ret = _kmc_dll.KeEncryptByDomain(domain_id, c_plain_text, len(plain_text), ctypes.byref(p_char), + ctypes.pointer(cipher_len)) + if ret != 0: + logging.error("KeEncryptByDomain failed.") + return "" + value = p_char.value.decode() + ret = _libc_dll.free(p_char) + if ret != 0: + logging.error("free memory error. ret=%d" % ret) + return value + + +def _decrypt(domain_id: int, cipher_text: str): + """Decrypt.""" + p_char = ctypes.c_char_p() + plain_len = ctypes.c_int(0) + c_cipher_text = ctypes.create_string_buffer(cipher_text.encode()) + _kmc_dll.KeDecryptByDomain.restype = ctypes.c_int + _kmc_dll.KeDecryptByDomain.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.POINTER(ctypes.c_char_p), + ctypes.POINTER(ctypes.c_int)] + ret = _kmc_dll.KeDecryptByDomain(domain_id, c_cipher_text, len(cipher_text), ctypes.byref(p_char), + ctypes.pointer(plain_len)) + if ret != 0: + logging.error("KeDecryptByDomain failed.") + return "" + value = p_char.value.decode() + ret = _libc_dll.free(p_char) + if ret != 0: + logging.error("free memory error. ret=%d" % ret) + return value + + +def check_and_update_mk(domain_id: int, advance_day: int) -> bool: + """Check and update mk.""" + ret = _kmc_dll.KeCheckAndUpdateMk(domain_id, advance_day) + if ret != 0: + logging.error(f"failed to call KeCheckAndUpdateMk, code={ret}") + return False + return True + + +def update_root_key() -> bool: + """Update root key.""" + ret = _kmc_dll.KeUpdateRootKey() + if ret != 0: + logging.error(f"failed to call KeUpdateRootKey, code={ret}") + return False + return True + + +def finalize() -> None: + """Finalize.""" + _kmc_dll.KeFinalize.restype = ctypes.c_int + _kmc_dll.KeFinalize.argtypes = [] + _kmc_dll.KeFinalize() + + +def _get_lib_path(): + pkg_path = os.path.dirname(__file__) + if platform.processor() == "x86_64": + return os.path.join(pkg_path, "x86_64/libkmcext.so") + else: + return os.path.join(pkg_path, "aarch64/libkmcext.so") + + +def _load_dll(kmc_dll_path: str) -> None: + global _kmc_dll + if _kmc_dll: + return + global _libc_dll + if _libc_dll: + return + _libc_dll = ctypes.CDLL(find_library("c")) + _kmc_dll = ctypes.CDLL(kmc_dll_path) + + +@ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p) +def _logger(level: ctypes.c_int, msg: ctypes.c_char_p): + logging.info("level:%d, msg:%s" % (level, str(msg))) + + +def _init_log(): + _kmc_dll.KeSetLoggerCallback.restype = None + _kmc_dll.KeSetLoggerCallback.argtypes = [ctypes.CFUNCTYPE(None, ctypes.c_int, ctypes.c_char_p)] + _kmc_dll.KeSetLoggerCallback(_logger) + _kmc_dll.KeSetLoggerLevel.restype = None + _kmc_dll.KeSetLoggerLevel.argtypes = [ctypes.c_int] + _kmc_dll.KeSetLoggerLevel(2) # DISABLE(0),ERROR(1),WARN(2),INFO(3),DEBUG(4),TRACE(5) + + +class KMCConfig(ctypes.Structure): + _fields_ = [ + ("primaryKeyStoreFile", ctypes.c_char * 4096), + ("standbyKeyStoreFile", ctypes.c_char * 4096), + ("domainCount", ctypes.c_int), + ("role", ctypes.c_int), + ("procLockPerm", ctypes.c_int), + ("sdpAlgId", ctypes.c_int), + ("hmacAlgId", ctypes.c_int), + ("semKey", ctypes.c_int) + ] + + +def _init_kmc_config(primary_key_store_file, standby_key_store_file, alg_id, domain_count): + config = KMCConfig() + config.primaryKeyStoreFile = primary_key_store_file.encode() + config.standbyKeyStoreFile = standby_key_store_file.encode() + config.domainCount = domain_count + config.role = 1 # Agent 0; Master 1 + config.procLockPerm = 0o0600 + config.sdpAlgId = alg_id + config.hmacAlgId = 2052 # HMAC_SHA256 2052; HMAC_SHA384 2053 HMAC_SHA512 2054 + config.semKey = 0x20161516 + _kmc_dll.KeInitialize.restype = ctypes.c_int + _kmc_dll.KeInitialize.argtypes = [ctypes.POINTER(KMCConfig)] + return _kmc_dll.KeInitialize(ctypes.byref(config)) + + +def init(primary_key_store_file: str, standby_key_store_file: str, alg_id: int, domain_count=3) -> bool: + """Initialize.""" + if alg_id not in [5, 7, 8, 9]: # AES128_CBC, AES256_CBC, AES128_GCM, AES256_GCM + logging.error(f"alg (id={alg_id}) is not legal") + return False + _load_dll(_get_lib_path()) + _init_log() + ret = _init_kmc_config(primary_key_store_file, standby_key_store_file, alg_id, domain_count) + if ret != 0: + logging.error(f"failed to call KeInitialized, code={ret}") + return False + return True + + +def decrypt(cert_pem_file, secret_key_file, key_mm, key_component_1, key_component_2): + """Decrypt the passwd.""" + sdp_alg_id = 9 + # Make sure ssl certificate file exist + ca_file_list = (cert_pem_file, secret_key_file) + for file in ca_file_list: + if file and os.path.exists(file): + continue + else: + logging.error("SSL Certificate files does not exist! Please check config.yaml and cert file.") + raise FileNotFoundError + + primary_keyStoreFile = key_component_1 + standby_keyStoreFile = key_component_2 + ret = init(primary_keyStoreFile, standby_keyStoreFile, sdp_alg_id) + if ret is False: + logging.error("kmc init error.") + raise Exception('ERROR: kmc init failed!') + domain_id = 0 + decrypt_mm = _decrypt(domain_id, key_mm) + if decrypt_mm == "": + logging.error("kmc init error.") + raise Exception('ERROR: kmc init failed!') + check_and_update_mk(domain_id, ADVANCE_DAY) + finalize() + return decrypt_mm diff --git a/vega/security/kmc/utils.py b/vega/security/kmc/utils.py new file mode 100644 index 0000000..f99bf2f --- /dev/null +++ b/vega/security/kmc/utils.py @@ -0,0 +1,44 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Some tools.""" +import re +import logging + + +def check_password_rule(password): + """Check password rule.""" + digit_regex = re.compile(r'\d') + upper_regex = re.compile(r'[A-Z]') + lower_regex = re.compile(r'[a-z]') + + if len(password) < 8: + logging.warning("The length must >= 8") + return False + + if len(digit_regex.findall(password)) == 0: + logging.warning("Must contains digit letters") + return False + + if len(upper_regex.findall(password)) == 0: + logging.warning("Must contains capital letters") + return False + + if len(lower_regex.findall(password)) == 0: + logging.warning("Must contains lowercase letters") + return False + + return True diff --git a/vega/security/load_pickle.py b/vega/security/load_pickle.py new file mode 100644 index 0000000..df63f23 --- /dev/null +++ b/vega/security/load_pickle.py @@ -0,0 +1,57 @@ +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Load pickle.""" + +import pickle + +__all__ = ["restricted_loads"] + + +safe_builtins = { + 'vega', + 'torch', + 'torchvision', + 'functools', + 'timm', + 'mindspore', + 'tensorflow', + 'numpy', + 'imageio', + 'collections', +} + + +class RestrictedUnpickler(pickle.Unpickler): + """Restrict unpickler.""" + + def __init__(self, file, fix_imports, encoding, errors, security): + super(RestrictedUnpickler, self).__init__(file=file, fix_imports=fix_imports, encoding=encoding, errors=errors) + self.security = security + + def find_class(self, module, name): + """Find class.""" + _class = super().find_class(module, name) + if self.security: + if module.split('.')[0] in safe_builtins: + return _class + raise pickle.UnpicklingError(f"global '{module}' is forbidden") + else: + return _class + + +def restricted_loads(file, fix_imports=True, encoding="ASCII", errors="strict", security=False): + """Load obj.""" + return RestrictedUnpickler(file, fix_imports=fix_imports, encoding=encoding, errors=errors, + security=security).load() diff --git a/vega/security/post.py b/vega/security/post.py new file mode 100644 index 0000000..a5110e1 --- /dev/null +++ b/vega/security/post.py @@ -0,0 +1,57 @@ +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Rest post operation in security mode.""" + +import urllib +import json +import logging +import requests +from .conf import get_config +from .utils import create_context +from .args import check_msg +from .verify_cert import verify_cert + + +def post(host, files, data): + """Post a REST requstion in security mode.""" + sec_cfg = get_config('client') + + ca_file = sec_cfg.ca_cert + cert_pem_file = sec_cfg.client_cert + secret_key_file = sec_cfg.client_secret_key + encrypted_password = sec_cfg.encrypted_password + key_component_1 = sec_cfg.key_component_1 + key_component_2 = sec_cfg.key_component_2 + + if not cert_pem_file or not secret_key_file or not ca_file: + logging.error("CERT file is not existed.") + + if not verify_cert(ca_file, cert_pem_file): + logging.error(f"The cert {ca_file} and {cert_pem_file} are invalid, please check.") + + if encrypted_password == "": + context = create_context(ca_file, cert_pem_file, secret_key_file) + else: + context = create_context(ca_file, cert_pem_file, secret_key_file, encrypted_password, key_component_1, + key_component_2) + if host.lower().startswith('https') is False: + raise Exception(f'The host {host} must start with https') + prepped = requests.Request(method="POST", url=host, files=files, data=data).prepare() + request = urllib.request.Request(host, data=prepped.body, method='POST') + request.add_header("Content-Type", prepped.headers['Content-Type']) + response = urllib.request.urlopen(request, context=context) # nosec + result = json.loads(response.read().decode('utf8')) + check_msg(dict((key, value) for key, value in result.items() if key != 'error_message')) + return result diff --git a/vega/security/query_process.py b/vega/security/query_process.py deleted file mode 100644 index 6a7e9bf..0000000 --- a/vega/security/query_process.py +++ /dev/null @@ -1,193 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Query vega process.""" - -import psutil -import json -import time -from psutil import _pprint_secs -from vega.common import MessageServer, MessageClient, argment_parser -from .run_pipeline import check_env - - -__all__ = [ - "query_task_info", "get_pid", "is_vega_process", "get_vega_pids", - "query_process", "query_processes", "print_process", "print_processes", -] - - -def _parse_args(desc): - parser = argment_parser(desc) - parser.add_argument("-j", "--json", action='store_true', - help="return json format string") - args = parser.parse_args() - return args - - -def get_vega_pids(): - """Get vega pids.""" - pids = psutil.pids() - vega_pids = [] - for pid in pids: - if is_vega_process(pid): - try: - p = psutil.Process(pid) - except Exception: - continue - ppid = p.ppid() - if ppid in [_pid for (_pid, _ppid) in vega_pids]: - continue - if pid in [_ppid for (_pid, _ppid) in vega_pids]: - vega_pids = [(_pid, _ppid) for (_pid, _ppid) in vega_pids if _ppid != pid] - vega_pids.append((pid, ppid)) - continue - vega_pids.append((pid, ppid)) - return [_pid for (_pid, _ppid) in vega_pids] - - -def get_task_id_path_port(pid): - """Get task id.""" - try: - p = psutil.Process(pid) - for connection in p.connections(): - port = connection.laddr.port - ip = connection.laddr.ip - if port in range(MessageServer().min_port, MessageServer().max_port): - client = MessageClient(ip=ip, port=port, timeout=1) - result = client.send(action="query_task_info") - if isinstance(result, dict) and "task_id" in result: - return result.get("task_id"), result.get("base_path"), ip, port - return None, None, None, None - except Exception: - return None, None, None, None - - -def get_pid(task_id): - """Get process id.""" - processes = query_processes() - for process in processes.values(): - if "task_id" in process and task_id == process["task_id"]: - return process["PID"] - return None - - -def is_vega_process(pid): - """Is it vega process.""" - try: - p = psutil.Process(pid) - if p.name().startswith("vega-main"): - return True - except Exception: - return False - return False - - -def _print_processes_info(processes): - if processes: - print("Vega processes:") - for id in processes: - print("{}:".format(id)) - process = processes[id] - print_process(process) - if "task_id" in process and process["task_id"] != "Unknown": - _pid = process["PID"] - _task_id = process["task_id"] - _cwd = process["cwd"] - _base_path = process["base_path"] - if "_pid" in locals(): - print("") - if _task_id != "Unknown": - print("Query progress:") - print(f" vega-progress -t {_task_id} -r {_base_path}") - print("") - print("Kill process:") - print(f" vega-kill -p {_pid}") - if _task_id != "Unknown": - print(f" vega-kill -t {_task_id}") - print("") - else: - print("The Vega main program is not found.") - - -def print_process(process): - """Print process info.""" - if "task_id" in process: - print(" PID: {}".format(process["PID"])) - print(" task id: {}".format(process["task_id"])) - print(" cwd: {}".format(process["cwd"])) - print(" user: {}".format(process["user"])) - print(" start at: {}".format(process["create_time"])) - print(" cmdline: {}".format(process["cmdline"])) - else: - print(" PID: {}".format(process["PID"])) - print(" message: {}".format(process["message"])) - - -def query_process(pid): - """Query process info.""" - try: - p = psutil.Process(pid) - (task_id, base_path, ip, port) = get_task_id_path_port(pid) - return { - "PID": pid, - "cmdline": p.cmdline()[2:], - "create_time": _pprint_secs(p.create_time()), - "cwd": p.cwd(), - "task_id": task_id if task_id is not None else "Unknown", - "base_path": base_path if base_path is not None else "Unknown", - "user": p.username(), - "ip": ip, - "port": port, - "running_seconds": int(time.time() - p.create_time()), - } - except Exception as e: - return { - "PID": pid, - "message": str(e), - } - - -def query_task_info(task_id): - """Query task info.""" - pids = get_vega_pids() - if pids: - for id, pid in enumerate(pids): - info = query_process(pid) - if isinstance(info, dict) and info.get("task_id", None) == task_id: - return info - return None - - -def query_processes(): - """Query all process.""" - pids = get_vega_pids() - infos = {} - if pids: - for id, pid in enumerate(pids): - infos[str(id)] = query_process(pid) - return infos - - -def print_processes(): - """Print all processes.""" - if not check_env(): - return - """Print all processes.""" - args = _parse_args("Quey Vega processes.") - processes = query_processes() - if args.json: - print(json.dumps(processes, indent=4)) - else: - _print_processes_info(processes) - - -if __name__ == "__main__": - print_processes() diff --git a/vega/security/query_progress.py b/vega/security/query_progress.py deleted file mode 100644 index 3b25c2f..0000000 --- a/vega/security/query_progress.py +++ /dev/null @@ -1,175 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Inference of vega model.""" - -import os -import json -import time -from datetime import datetime -from vega.common import Status, JsonEncoder, DatatimeFormatString, argment_parser -from vega.tools.query_process import query_task_info -from vega.common import MessageClient -from .run_pipeline import check_env - - -__all__ = ["query_progress"] - - -def _parse_args(desc): - parser = argment_parser(desc) - parser.add_argument("-t", "--task_id", type=str, required=True, - help="vega application task id") - parser.add_argument("-r", "--root_path", type=str, required=True, - help="root path where vega application is running") - args = parser.parse_args() - return args - - -def _get_report_path(root_path, task_id): - task_path = os.path.join(root_path, task_id) - report_path = os.path.join(task_path, "output/reports.json") - return report_path - - -def _load_report(report_path): - try: - with open(report_path, "r") as f: - return json.load(f) - except Exception: - return None - - -def _parse_report(report): - if "_steps_" not in report: - return { - "status": Status.error, - "message": "Invalid report file." - } - - progress = { - "steps": report["_steps_"] - } - - model_keys = [ - "worker_id", "status", "message", "current_epoch", "num_epochs", - "start_time", "end_time", "model_path", "performance" - ] - - for step in progress["steps"]: - step_name = step["step_name"] - if step_name not in report: - continue - step["models"] = report[step_name] - for model in step["models"]: - keys = list(model.keys()) - for key in keys: - if key not in model_keys: - model.pop(key) - return progress - - -def _statistic_progress(progress): - # count epochs and models - for step in progress["steps"]: - finished_models = 0 - finished_epochs = 0 - if "models" not in step: - continue - for model in step["models"]: - if model["status"] in [Status.finished.value, Status.finished]: - finished_models += 1 - finished_epochs += model["current_epoch"] - else: - current_epoch = max((model["current_epoch"] - 1), 0) if "current_epoch" in model else 0 - finished_epochs += current_epoch - step["finished_models"] = finished_models - step["finished_epochs"] = finished_epochs - # calc time - for step in progress["steps"]: - step["estimated_end_time"] = None - if step["status"] == Status.running.value: - if "finished_epochs" in step and step["finished_epochs"] != 0: - start_time = datetime.strptime(step["start_time"], DatatimeFormatString) - delta = datetime.now() - start_time - delta = delta * (step["num_epochs"] - step["finished_epochs"]) / step["finished_epochs"] - step["estimated_end_time"] = datetime.now() + delta - # count status - all_finished = True - progress["status"] = Status.running - for step in progress["steps"]: - if step["status"] in [Status.error.value, Status.error]: - progress["status"] = Status.error - progress["message"] = step["message"] - all_finished = False - break - if step["status"] not in [Status.finished.value, Status.finished]: - all_finished = False - break - if all_finished: - progress["status"] = Status.finished - - return progress - - -def _query_report(task_info): - """Get task id.""" - try: - port = task_info["port"] - ip = task_info["ip"] - client = MessageClient(ip=ip, port=port, timeout=1) - return client.send(action="query_report") - except Exception: - return None - - -def query_progress(times=0): - """Query vega progress.""" - args = _parse_args("Query Vega progress.") - task_info = query_task_info(args.task_id) - - if not task_info: - report_path = _get_report_path(args.root_path, args.task_id) - if not os.path.exists(report_path): - times += 1 - if times <= 3: - time.sleep(0.5) - query_progress(times) - else: - return json.dumps({ - "status": Status.error, - "message": "The task does not exist, please check root path and task id." - }, cls=JsonEncoder, indent=4) - report = _load_report(report_path) - else: - report = _query_report(task_info) - if not report: - return json.dumps({ - "status": Status.error, - "message": "Failed to query progress." - }, cls=JsonEncoder, indent=4) - - progress = _parse_report(report) - progress = _statistic_progress(progress) - if progress["status"] == Status.running and not task_info: - progress["status"] = Status.stopped - - return json.dumps(progress, cls=JsonEncoder, indent=4) - - -def print_progress(): - if not check_env(): - return - """Print progress.""" - print(query_progress()) - - -if __name__ == "__main__": - print_progress() diff --git a/vega/security/rest.py b/vega/security/rest.py deleted file mode 100644 index 6d74cee..0000000 --- a/vega/security/rest.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Rest operation.""" - -import requests -from vega.common import General - - -def post(host, files, data): - """Post a REST requstion.""" - if General.security_setting.get("security").get("enable"): - pem_file = General.security_setting.get("https").get("cert_pem_file") - if not pem_file: - print("CERT file ({}) is not existed.".format(pem_file)) - result = requests.post(host, files=files, data=data, proxies={"https": None}, verify=pem_file) - else: - result = requests.post(host, files=files, data=data, proxies={"http": None}) - data = result.json() - return data diff --git a/vega/security/run_dask.py b/vega/security/run_dask.py index 73ee698..f403954 100644 --- a/vega/security/run_dask.py +++ b/vega/security/run_dask.py @@ -1,73 +1,121 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Run dask scheduler and worker.""" import os import subprocess import shutil +import logging +import socket +import random from distributed import Client -from vega.common.utils import get_available_port +from distributed.security import Security +from .conf import get_config +from .verify_cert import verify_cert -def get_client(address): - """Get dask client.""" - return Client(address) +sec_cfg = get_config('server') -def get_address(master_host, master_port): - """Get master address.""" - return "tcp://{}:{}".format(master_host, master_port) +def get_client_security(address): + """Get client.""" + address = address.replace("tcp", "tls") + if not verify_cert(sec_cfg.ca_cert, sec_cfg.client_cert_dask): + logging.error(f"The cert {sec_cfg.ca_cert} and {sec_cfg.client_cert_dask} are invalid, please check.") + sec = Security(tls_ca_file=sec_cfg.ca_cert, + tls_client_cert=sec_cfg.client_cert_dask, + tls_client_key=sec_cfg.client_secret_key_dask, + require_encryption=True) + return Client(address, security=sec) -def run_scheduler(port): +def get_address_security(master_host, master_port): + """Get address.""" + return "tls://{}:{}".format(master_host, master_port) + + +def run_scheduler_security(ip, port, tmp_file): """Run scheduler.""" - dashboard_port = get_available_port(min_port=30000, max_port=30999) - """Run dask-scheduler.""" + if not verify_cert(sec_cfg.ca_cert, sec_cfg.server_cert_dask): + logging.error(f"The cert {sec_cfg.ca_cert} and {sec_cfg.server_cert_dask} are invalid, please check.") return subprocess.Popen( [ "dask-scheduler", - "" "--no-dashboard", "--no-show", - "--host=127.0.0.1", - port, - f"--dashboard-address={dashboard_port}" + f"--tls-ca-file={sec_cfg.ca_cert}", + f"--tls-cert={sec_cfg.server_cert_dask}", + f"--tls-key={sec_cfg.server_secret_key_dask}", + f"--host={ip}", + "--protocol=tls", + f"--port={port}", + f"--scheduler-file={tmp_file}", + f"--local-directory={os.path.dirname(tmp_file)}", ], env=os.environ ) -def run_local_worker(address, local_dir): +def _available_port(min_port, max_port) -> int: + _sock = socket.socket() + while True: + port = random.randint(min_port, max_port) + try: + _sock.bind(('', port)) + _sock.close() + return port + except Exception: + logging.debug('Failed to get available port, continue.') + continue + return None + + +def run_local_worker_security(slave_ip, address, local_dir): """Run dask-worker on local node.""" - work_port = get_available_port(min_port=31000, max_port=31999) - dashboard_address = get_available_port(min_port=33000, max_port=33999) - return subprocess.Popen( + address = address.replace("tcp", "tls") + nanny_port = _available_port(30000, 30999) + worker_port = _available_port(29000, 29999) + pid = subprocess.Popen( [ "dask-worker", address, '--nthreads=1', '--nprocs=1', '--memory-limit=0', - local_dir, + f"--local-directory={local_dir}", + f"--tls-ca-file={sec_cfg.ca_cert}", + f"--tls-cert={sec_cfg.client_cert_dask}", + f"--tls-key={sec_cfg.client_secret_key_dask}", "--no-dashboard", - f'--listen-address=tcp://127.0.0.1:{work_port}', - '--nanny-port=32000:32999', - f'--dashboard-address={dashboard_address}' + f"--host={slave_ip}", + "--protocol=tls", + f"--nanny-port={nanny_port}", + f"--worker-port={worker_port}", ], env=os.environ ) + return pid -def run_remote_worker(slave_ip, address, local_dir): +def run_remote_worker_security(slave_ip, address, local_dir): """Run dask-worker on remote node.""" - return subprocess.Popen( + address = address.replace("tcp", "tls") + nanny_port = _available_port(30000, 30999) + worker_port = _available_port(29000, 29999) + pid = subprocess.Popen( [ "ssh", slave_ip, @@ -76,7 +124,16 @@ def run_remote_worker(slave_ip, address, local_dir): '--nthreads=1', '--nprocs=1', '--memory-limit=0', - local_dir + f"--local-directory={local_dir}", + f"--tls-ca-file={sec_cfg.ca_cert}", + f"--tls-cert={sec_cfg.client_cert_dask}", + f"--tls-key={sec_cfg.client_secret_key_dask}", + "--no-dashboard", + f"--host={slave_ip}", + "--protocol=tls", + f"--nanny-port={nanny_port}", + f"--worker-port={worker_port}", ], env=os.environ ) + return pid diff --git a/vega/security/run_flask.py b/vega/security/run_flask.py deleted file mode 100644 index 5a1d416..0000000 --- a/vega/security/run_flask.py +++ /dev/null @@ -1,190 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Run Flask.""" - -import configparser -import getpass -import logging -import re -import os -import ssl -import stat -import gevent -from gevent import pywsgi - - -security_mode = True -cert_pem_file = "" -secret_key_file = "" -white_list = None -request_frequency_limit = "100/minute" -max_content_length = 1000 * 1000 * 1000 - - -def load_security_setting(): - """Load security settings.""" - home = os.environ['HOME'] - config_file = os.path.join(home, ".vega/vega.ini") - if not os.path.exists(config_file): - print(f"Not found configure file: {config_file}") - return False - config = configparser.ConfigParser() - config.read(config_file) - if "limit" in config: - global white_list - global request_frequency_limit - global max_content_length - if "white_list" in config["limit"]: - white_list = config["limit"]["white_list"].split(',') - if "request_frequency_limit" in config["limit"]: - request_frequency_limit = config["limit"]["request_frequency_limit"] - if "max_content_length" in config["limit"]: - max_content_length = int(config["limit"]["max_content_length"]) - if "security" not in config or "enable" not in config["security"]: - print(f"Invalid config file: {config_file},security field must be included") - return False - global security_mode - security_mode = True if config["security"]["enable"].upper() == "TRUE" else False - if security_mode: - if "https" not in config or \ - "cert_pem_file" not in config["https"] or \ - "secret_key_file" not in config["https"]: - print(f"Invalid config file: {config_file},https field must be included") - return False - https_config = config["https"] - global cert_pem_file - global secret_key_file - if not os.path.exists(https_config['cert_pem_file']): - print(f"CERT file ({https_config['cert_pem_file']}) is not existed.") - return False - if not os.path.exists(https_config['secret_key_file']): - print(f"KEY file ({https_config['secret_key_file']}) is not existed.") - return False - cert_pem_file = https_config['cert_pem_file'] - secret_key_file = https_config['secret_key_file'] - - if not check_cert_key_file(cert_pem_file, secret_key_file): - return False - return True - - -def check_cert_key_file(cert_file, key_file): - """Check if cert and key file are risky.""" - res = True - for file in (cert_file, key_file): - if not os.stat(file).st_uid == os.getuid(): - logging.error("File <{}> is not owned by current user".format(file)) - res = False - if os.path.islink(file): - logging.error("File <{}> should not be soft link".format(file)) - res = False - if os.stat(file).st_mode & 0o0077: - logging.error("file <{}> is accessible by group/other users".format(file)) - res = False - - return res - - -def get_white_list(): - """Get white list.""" - global white_list - return white_list - - -def get_request_frequency_limit(): - """Get request frequncy limit.""" - global request_frequency_limit - return request_frequency_limit - - -def get_max_content_length(): - """Get max contect length.""" - global max_content_length - return max_content_length - - -def check_password_rule(password): - """Check password rule.""" - digit_regex = re.compile(r'\d') - upper_regex = re.compile(r'[A-Z]') - lower_regex = re.compile(r'[a-z]') - - if len(password) < 8: - print("The length of your password must >= 8") - return False - - if len(digit_regex.findall(password)) == 0: - print("Your password must contains digit letters") - return False - - if len(upper_regex.findall(password)) == 0: - print("Your password must contains capital letters") - return False - - if len(lower_regex.findall(password)) == 0: - print("Your password must contains lowercase letters") - return False - - return True - - -def get_secret_key_passwd(): - """Get secret key password.""" - password = getpass.getpass("Please input password of your server key: ") - - if not check_password_rule(password): - print("You should re-generate your server cert/key by a password with following rules:") - print("1. equals to or longer than 8 letters") - print("2. contains at least one digit letter") - print("3. contains at least one capital letter") - print("4. contains at least one lowercase letter") - return None - - return password - - -def run_flask(app, host, port): - """Run flask.""" - if not load_security_setting(): - return - - app.config['MAX_CONTENT_LENGTH'] = get_max_content_length() - - global security_mode - if security_mode: - ciphers = "ECDHE-ECDSA-AES128-CCM:ECDHE-ECDSA-AES256-CCM:ECDHE-ECDSA-AES128-GCM-SHA256"\ - ":ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384"\ - ":DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES128-GCM-SHA256"\ - ":DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-CCM:DHE-RSA-AES256-CCM" - password = get_secret_key_passwd() - if password is None: - return - global cert_pem_file - global secret_key_file - context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) - context.set_ciphers(ciphers) - context.load_cert_chain(certfile=cert_pem_file, keyfile=secret_key_file, password=password) - server = pywsgi.WSGIServer((host, port), app, ssl_context=context) - else: - server = pywsgi.WSGIServer((host, port), app) - - server.init_socket() - server._stop_event.clear() - - def server_forever(): - server.start_accepting() - print("server started.") - server._stop_event.wait() - gevent.wait() - - from multiprocessing import Process - p = Process(target=server_forever) - p.start() diff --git a/vega/security/run_pipeline.py b/vega/security/run_pipeline.py deleted file mode 100644 index 70125e0..0000000 --- a/vega/security/run_pipeline.py +++ /dev/null @@ -1,378 +0,0 @@ -# -*- coding:utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Run pipeline.""" -import configparser -import logging -import os -import stat -import sys -import vega -from copy import deepcopy - -from vega.common.general import General -from vega.common.config import Config -from vega.common.utils import verify_requires -from vega.common import argment_parser - - -def _append_env(): - dir_path = os.getcwd() - sys.path.insert(0, dir_path) - if "PYTHONPATH" not in os.environ: - os.environ["PYTHONPATH"] = dir_path - else: - os.environ["PYTHONPATH"] += ":{}".format(dir_path) - - -def _parse_args(): - parser = argment_parser("Run Vega") - parser.add_argument("config_file", default=None, type=str, - help="Pipeline config file name") - group_backend = parser.add_argument_group( - title="set backend and device, priority: specified in the command line > " - "specified in the configuration file > default settings(pytorch and GPU)") - group_backend.add_argument("-b", "--backend", default=None, type=str, - choices=["pytorch", "p", "tensorflow", "t", "mindspore", "m"], - help="set training platform") - group_backend.add_argument("-d", "--device", default=None, type=str, - choices=["GPU", "NPU"], - help="set training device") - group_resume = parser.add_argument_group(title="Resume not finished task") - group_resume.add_argument("-r", "--resume", action='store_true', - help="resume not finished task") - group_resume.add_argument("-t", "--task_id", default=None, type=str, - help="specify the ID of the task to be resumed") - group_config = parser.add_argument_group(title='Modify config for yml') - group_config.add_argument("-m", "--modify", action='store_true', - help="modify some config") - group_config.add_argument("-dt", "--dataset", default=None, type=str, - help='modify dataset for all pipe_step') - group_config.add_argument("-dp", "--data_path", default=None, type=str, - help="modify data_path for all pipe_step") - group_config.add_argument("-bs", "--batch_size", default=None, type=str, - help='modify batch_size of dataset for all pipe_step') - group_config.add_argument("-es", "--epochs", default=None, type=str, - help='modify fully_train epochs') - group_config.add_argument("-f", "--force", default=None, action="store_true", - help='skip check validation of pretrained model') - args = parser.parse_args() - return args - - -def _modify_config(args, cfg): - if isinstance(cfg, dict): - for key in cfg.keys(): - if key in args.keys(): - if isinstance(cfg[key], dict): - cfg[key] = _modify_config(args[key], cfg[key]) - else: - cfg[key] = args[key] - cfg[key] = _modify_config(args, cfg[key]) - return deepcopy(cfg) - - -def _check_parse(args): - keys = [key for key in args.keys()] - for key in keys: - if args[key] is None: - args.pop(key) - if 'dataset' in args.keys(): - dataset_type = args['dataset'] - args['dataset'] = {'type': dataset_type} - return args - - -def _set_backend(args): - backend = args.backend - device = args.device - if backend: - if args.backend in ["pytorch", "p"]: - backend = "pytorch" - elif args.backend in ["tensorflow", "t"]: - backend = "tensorflow" - elif args.backend in ["mindspore", "m"]: - backend = "mindspore" - else: - config = Config(args.config_file) - if "general" in config and "backend" in config["general"]: - backend = config["general"]["backend"] - if not device: - config = Config(args.config_file) - if "general" in config and "device_category" in config["general"]: - device = config["general"]["device_category"] - if backend: - General.backend = backend - if device: - General.device_category = device - vega.set_backend(General.backend, General.device_category) - - -def _resume(args): - if args.resume: - if not args.task_id: - raise Exception("Please set task id (-t task_id) if you want resume not finished task.") - from vega.common.general import TaskConfig - General.task.task_id = args.task_id - General._resume = True - TaskConfig.backup_original_value(force=True) - General.backup_original_value(force=True) - - -def _backup_config(args): - _file = args.config_file - from vega.common.task_ops import TaskOps - from vega.common.file_ops import FileOps - dest_file = FileOps.join_path(TaskOps().local_output_path, os.path.basename(_file)) - FileOps.make_base_dir(dest_file) - FileOps.copy_file(_file, dest_file) - - -def _change_process_name(): - from ctypes import cdll, byref, create_string_buffer - libc = cdll.LoadLibrary('libc.so.6') - buff = create_string_buffer(bytes("vega-main", "utf-8")) - libc.prctl(15, byref(buff), 0, 0, 0) - - -class LoadConfigException(Exception): - """Load config exception.""" - - pass - - -def _read_config_file(): - """Read config file and return ConfigParser.""" - vega_config_file = os.path.join(os.environ['HOME'], ".vega", "vega.ini") - if not os.path.exists(vega_config_file): - raise LoadConfigException(f"Not found configure file: {vega_config_file}") - config = configparser.ConfigParser() - config.read(vega_config_file) - return config - - -def _parse_config(config): - General.security_setting = config._sections - General.security_setting.get("security")["enable"] = True \ - if str(General.security_setting.get("security").get("enable")).upper() == "TRUE" else False - - -def _get_config_field(config, field): - if field not in config: - raise LoadConfigException("field <{}> is not existed in config file".format(field)) - return config[field] - - -def _get_config_key(config, key, field): - if key not in config: - raise LoadConfigException("key <{}> is not in field <{}> of config file".format(key, field)) - return config[key] - - -def _check_if_file_config_correct(config, key, field): - file = _get_config_key(config, key, field) - if not os.path.exists(file): - raise LoadConfigException("file <{}> is not existed.".format(file)) - - -def _check_security_switch_valid(config): - if "security" not in config or "enable" not in config["security"]: - raise LoadConfigException("Invalid config file: security field must be included") - - -def _get_security_switch_on_off(config): - return True if config["security"]["enable"].upper() == "TRUE" else False - - -def load_security_setting(): - """Load security settings.""" - try: - config = _read_config_file() - _check_security_switch_valid(config) - security_mode = _get_security_switch_on_off(config) - if not security_mode: - General.security_setting = { - "security": { - "enable": False - } - } - return True - _check_config_validation(config) - _parse_config(config) - except LoadConfigException as e: - logging.warning("load_security_setting failed: {}".format(e)) - return False - return True - - -def _check_cert_key_file(config, key, field): - file = _get_config_key(config, key, field) - if not os.stat(file).st_uid == os.getuid(): - raise Exception("File <{}> is not owned by current user".format(file)) - if os.path.islink(file): - raise Exception("File <{}> should not be soft link".format(file)) - if os.stat(file).st_mode & 0o0077: - raise Exception("file <{}> is accessible by group/other users".format(file)) - - -def _check_config_validation(config): - https_config = _get_config_field(config, "https") - _check_if_file_config_correct(https_config, "cert_pem_file", "https") - _check_cert_key_file(https_config, "cert_pem_file", "https") - - -def _file_exist(path): - return os.access(path, os.F_OK) - - -def _file_belong_to_current_user(path): - return os.stat(path).st_uid == os.getuid() - - -def _file_other_writable(path): - return os.stat(path).st_mode & stat.S_IWOTH - - -def _file_is_link(path): - return os.path.islink(path) - - -def check_env(): - """Check environment.""" - if not load_security_setting(): - return False - return True - - -def _get_risky_files_by_suffix(suffixes, path): - risky_files = [] - non_current_user_files = [] - others_writable_files = [] - link_files = [] - for suffix in suffixes: - if not path.endswith(suffix): - continue - abs_path = os.path.abspath(path) - if _file_exist(abs_path): - risky_files.append(abs_path) - if not _file_belong_to_current_user(abs_path): - non_current_user_files.append(abs_path) - if _file_other_writable(abs_path): - others_writable_files.append(abs_path) - if _file_is_link(abs_path): - link_files.append(abs_path) - - return risky_files, non_current_user_files, others_writable_files, link_files - - -def get_risky_files(config): - """Get contained risky file (.pth/.pth.tar/.onnx/.py).""" - risky_files = [] - non_current_user_files = [] - others_writable_files = [] - link_files = [] - - if not isinstance(config, Config): - return risky_files, non_current_user_files, others_writable_files, link_files - - for value in config.values(): - if isinstance(value, Config) and value.get("type") == "DeepLabNetWork": - value = value.get("dir").rstrip("/") + "/" + value.get("name").lstrip("/") + ".py" - if isinstance(value, str): - temp_risky_files, temp_non_current_user_files, temp_other_writable_files, temp_link_files \ - = _get_risky_files_by_suffix([".pth", ".pth.tar", ".py"], value) - risky_files.extend(temp_risky_files) - non_current_user_files.extend(temp_non_current_user_files) - others_writable_files.extend(temp_other_writable_files) - link_files.extend(temp_link_files) - temp_risky_files, temp_non_current_user_files, temp_other_writable_files, temp_link_files \ - = get_risky_files(value) - risky_files.extend(temp_risky_files) - non_current_user_files.extend(temp_non_current_user_files) - others_writable_files.extend(temp_other_writable_files) - link_files.extend(temp_link_files) - - return risky_files, non_current_user_files, others_writable_files, link_files - - -def check_risky_file(args, config): - """Check risky file (.pth/.pth.tar/.py).""" - if args.force: - return True - risky_files, non_current_user_files, others_writable_files, link_files = get_risky_files(config) - if len(risky_files) == 0: - return True - - print("\033[1;33m" - "WARNING: The following executable files will be loaded:" - "\033[0m") - for file in risky_files: - print(file) - if len(non_current_user_files) > 0: - print("\033[1;33m" - "WARNING: The following executable files that will be loaded do not belong to the current user:" - "\033[0m") - for file in non_current_user_files: - print(file) - if len(others_writable_files) > 0: - print("\033[1;33m" - "WARNING: The following executable files that will be loaded have others write permission:" - "\033[0m") - for file in others_writable_files: - print(file) - if len(link_files) > 0: - print("\033[1;33m" - "WARNING: The following executable files that will be loaded is soft link file:" - "\033[0m") - for file in link_files: - print(file) - user_confirm = input("It is possible to construct malicious pickle data " - "which will execute arbitrary code during unpickling .pth/.pth.tar/.py files. " - "\nPlease ensure the safety and consistency of the loaded executable files. " - "\nDo you want to continue? (yes/no) ").strip(" ") - while user_confirm != "yes" and user_confirm != "no": - user_confirm = input("Please enter yes or no! ").strip(" ") - if user_confirm == "yes": - return True - elif user_confirm == "no": - return False - - -def run_pipeline(load_special_lib_func=None): - """Run pipeline.""" - os.umask(0o027) - args = _parse_args() - _resume(args) - _set_backend(args) - _append_env() - if load_special_lib_func: - load_special_lib_func(args.config_file) - config = Config(args.config_file) - # load general - if config.get("general"): - General.from_dict(config.get("general"), skip_check=False) - os.environ['TF_CPP_MIN_LOG_LEVEL'] = str(General.TF_CPP_MIN_LOG_LEVEL) - # check env - if not check_env(): - return - if not check_risky_file(args, config): - return - if General.requires and not verify_requires(General.requires): - return - dict_args = vars(args) - dict_args = _check_parse(dict_args) - config = _modify_config(dict_args, config) - # _backup_config(args) - _change_process_name() - vega.run(config) - - -if __name__ == '__main__': - run_pipeline() diff --git a/vega/security/setup.py b/vega/security/setup.py deleted file mode 100644 index 70acd1c..0000000 --- a/vega/security/setup.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Setuptools of vega.""" - -import os -import setuptools -import sys -from setuptools.command.install import install as _install - -if sys.version_info < (3, 6): - sys.exit("Sorry, Python < 3.6 is not supported.") - -with open("RELEASE.md", "r") as fh: - long_desc = fh.read() - - -def _post_install(): - vega_dir = os.path.join(os.getenv("HOME"), ".vega") - os.makedirs(vega_dir, exist_ok=True) - vega_config_file = os.path.join(vega_dir, "vega.ini") - if os.path.exists(vega_config_file): - return - - with open(vega_config_file, "w") as wf: - wf.write("[security]\n") - wf.write("enable=True\n") - wf.write("\n") - wf.write("[https]\n") - wf.write("cert_pem_file=\n") - wf.write("secret_key_file=\n") - wf.write("\n") - wf.write("[limit]\n") - wf.write("request_frequency_limit=100/minute\n") - wf.write("max_content_length=1000000000\n") - wf.write("#white_list=0.0.0.0,127.0.0.1\n") - - -class install(_install): - """Post installation.""" - - def run(self): - """Run.""" - _install.run(self) - self.execute(_post_install, (), msg="Running pre install task") - - -cmd_class = dict(install=install) - -setuptools.setup( - name="noah-vega", - cmdclass=cmd_class, - version="1.8.0.mindstudio", - packages=["vega", "evaluate_service"], - include_package_data=True, - python_requires=">=3.6", - author="Huawei Noah's Ark Lab", - author_email="", - description="AutoML Toolkit", - long_description=long_desc, - long_description_content_type="text/markdown", - license="MIT", - url="https://github.com/huawei-noah/vega", - # packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: POSIX :: Linux", - ], - install_requires=[ - "pyzmq", - ], - entry_points=""" - [console_scripts] - vega=vega.tools.run_pipeline:run_pipeline - vega-security-config=vega.tools.config_op:vega_config_operate - vega-kill=vega.tools.kill:_kill - vega-verify-cluster=vega.tools.verify_cluster:_verify_cluster - vega-fine-tune=vega.tools.fine_tune:_fine_tune - vega-progress=vega.tools.query_progress:print_progress - vega-process=vega.tools.query_process:print_processes - vega-evaluate-service=evaluate_service.main:run - """, -) diff --git a/vega/security/utils.py b/vega/security/utils.py new file mode 100644 index 0000000..9b6c220 --- /dev/null +++ b/vega/security/utils.py @@ -0,0 +1,46 @@ +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Context utils.""" +import ssl +import sys +import logging + + +def create_context(ca_file, cert_pem_file, secret_key_file, key_mm=None, key_component_1=None, key_component_2=None): + """Create the SSL context.""" + ciphers = "ECDHE-ECDSA-AES128-CCM:ECDHE-ECDSA-AES256-CCM:ECDHE-ECDSA-AES128-GCM-SHA256" \ + ":ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384" \ + ":DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES128-GCM-SHA256" \ + ":DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-CCM:DHE-RSA-AES256-CCM" + context = ssl.SSLContext(ssl.PROTOCOL_TLS) + context.options += ssl.OP_NO_TLSv1 + context.options += ssl.OP_NO_TLSv1_1 + if sys.version_info >= (3, 7): + context.options += ssl.OP_NO_TLSv1_2 + context.options += ssl.OP_NO_RENEGOTIATION + context.options -= ssl.OP_ALL + context.verify_mode = ssl.CERT_REQUIRED + context.set_ciphers(ciphers) + if key_mm is not None: + from .kmc.kmc import decrypt + logging.debug("Using encrypted key.") + if key_component_1 is None or key_component_2 is None: + logging.error("For encrypted key, the component must be provided.") + decrypt_mm = decrypt(cert_pem_file, secret_key_file, key_mm, key_component_1, key_component_2) + context.load_cert_chain(cert_pem_file, secret_key_file, password=decrypt_mm) + else: + context.load_cert_chain(cert_pem_file, secret_key_file) + context.load_verify_locations(ca_file) + return context diff --git a/vega/security/verify_cert.py b/vega/security/verify_cert.py new file mode 100644 index 0000000..cdc7238 --- /dev/null +++ b/vega/security/verify_cert.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Verify cert.""" + +import logging + + +def verify_cert(ca_cert_file, cert_file): + """Verify the cert.""" + from OpenSSL.crypto import load_certificate, FILETYPE_PEM, X509Store, X509StoreContext, X509StoreContextError + ca_cert = load_certificate(FILETYPE_PEM, open(ca_cert_file, "r", encoding="utf-8").read()) + cert = load_certificate(FILETYPE_PEM, open(cert_file, 'r', encoding="utf-8").read()) + if ca_cert.has_expired() or cert.has_expired(): + logging.error("The cert is expired, please check.") + return False + store = X509Store() + store.add_cert(ca_cert) + ctx = X509StoreContext(store, cert) + try: + ctx.verify_certificate() + except X509StoreContextError: + logging.error("Certificate signature failure, ca cert file and cert file not match.") + return False + return True diff --git a/vega/security/verify_config.py b/vega/security/verify_config.py new file mode 100644 index 0000000..f5c910e --- /dev/null +++ b/vega/security/verify_config.py @@ -0,0 +1,152 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run pipeline.""" + +import configparser +import logging +import os +import stat + + +def _file_exist(path): + return os.access(path, os.F_OK) + + +def _file_belong_to_current_user(path): + return os.stat(path).st_uid == os.getuid() + + +def _file_other_writable(path): + return os.stat(path).st_mode & stat.S_IWOTH + + +def _file_is_link(path): + return os.path.islink(path) + + +def _get_risky_files_by_suffix(suffixes, path): + risky_files = [] + non_current_user_files = [] + others_writable_files = [] + link_files = [] + for suffix in suffixes: + if not path.endswith(suffix): + continue + abs_path = os.path.abspath(path) + if _file_exist(abs_path): + risky_files.append(abs_path) + if not _file_belong_to_current_user(abs_path): + non_current_user_files.append(abs_path) + if _file_other_writable(abs_path): + others_writable_files.append(abs_path) + if _file_is_link(abs_path): + link_files.append(abs_path) + + return risky_files, non_current_user_files, others_writable_files, link_files + + +def get_risky_files(config): + """Get contained risky file (.pth/.pth.tar/.onnx/.py).""" + risky_files = [] + non_current_user_files = [] + others_writable_files = [] + link_files = [] + from vega.common.config import Config + if not isinstance(config, Config): + return risky_files, non_current_user_files, others_writable_files, link_files + + for value in config.values(): + if isinstance(value, Config) and value.get("type") == "DeepLabNetWork": + value = value.get("dir").rstrip("/") + "/" + value.get("name").lstrip("/") + ".py" + if isinstance(value, str): + temp_risky_files, temp_non_current_user_files, temp_other_writable_files, temp_link_files \ + = _get_risky_files_by_suffix([".pth", ".pth.tar", ".py"], value) + risky_files.extend(temp_risky_files) + non_current_user_files.extend(temp_non_current_user_files) + others_writable_files.extend(temp_other_writable_files) + link_files.extend(temp_link_files) + temp_risky_files, temp_non_current_user_files, temp_other_writable_files, temp_link_files \ + = get_risky_files(value) + risky_files.extend(temp_risky_files) + non_current_user_files.extend(temp_non_current_user_files) + others_writable_files.extend(temp_other_writable_files) + link_files.extend(temp_link_files) + + return risky_files, non_current_user_files, others_writable_files, link_files + + +def check_risky_file(args, config): + """Check risky file (.pth/.pth.tar/.py).""" + if not args.security: + return True + risky_files, non_current_user_files, others_writable_files, link_files = get_risky_files(config) + if len(risky_files) == 0: + return True + + print("\033[1;33m" + "WARNING: The following executable files will be loaded:" + "\033[0m") + for file in risky_files: + print(file) + if len(non_current_user_files) > 0: + print("\033[1;33m" + "WARNING: The following executable files that will be loaded do not belong to the current user:" + "\033[0m") + for file in non_current_user_files: + print(file) + if len(others_writable_files) > 0: + print("\033[1;33m" + "WARNING: The following executable files that will be loaded have others write permission:" + "\033[0m") + for file in others_writable_files: + print(file) + if len(link_files) > 0: + print("\033[1;33m" + "WARNING: The following executable files that will be loaded is soft link file:" + "\033[0m") + for file in link_files: + print(file) + user_confirm = input("It is possible to construct malicious pickle data " + "which will execute arbitrary code during unpickling .pth/.pth.tar/.py files. " + "\nPlease ensure the safety and consistency of the loaded executable files. " + "\nDo you want to continue? (yes/no) ").strip(" ") + while user_confirm != "yes" and user_confirm != "no": + user_confirm = input("Please enter yes or no! ").strip(" ") + if user_confirm == "yes": + return True + elif user_confirm == "no": + return False + + +def check_risky_files(file_list): + """Check if cert and key file are risky.""" + res = True + for file in file_list: + if not os.path.exists(file): + logging.error(f"File <{file}> does not exist") + res = False + continue + if not _file_belong_to_current_user(file): + logging.error(f"File <{file}> is not owned by current user") + res = False + if _file_is_link(file): + logging.error(f"File <{file}> should not be soft link") + res = False + if os.stat(file).st_mode & 0o0177: + logging.error(f"File <{file}> permissions are not correct, cannot exceed 600") + res = False + return res diff --git a/vega/security/zmq_op.py b/vega/security/zmq_op.py new file mode 100644 index 0000000..29b89d5 --- /dev/null +++ b/vega/security/zmq_op.py @@ -0,0 +1,70 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""ZMQ operation.""" +import os +import uuid +import zmq +import zmq.auth +from zmq.auth.thread import ThreadAuthenticator + + +def listen_security(ip, min_port, max_port, max_tries, temp_path): + """Listen on server.""" + ctx = zmq.Context.instance() + # Start an authenticator for this context. + auth = ThreadAuthenticator(ctx) + auth.start() + auth.configure_curve(domain='*', location=zmq.auth.CURVE_ALLOW_ANY) + + socket = ctx.socket(zmq.REP) + server_secret_key = os.path.join(temp_path, "server.key_secret") + if not os.path.exists(server_secret_key): + _, server_secret_key = zmq.auth.create_certificates(temp_path, "server") + server_public, server_secret = zmq.auth.load_certificate(server_secret_key) + if os.path.exists(server_secret_key): + os.remove(server_secret_key) + socket.curve_secretkey = server_secret + socket.curve_publickey = server_public + socket.curve_server = True # must come before bind + + port = socket.bind_to_random_port( + f"tcp://{ip}", min_port=min_port, max_port=max_port, max_tries=100) + return socket, port + + +def connect_security(ip, port, temp_path): + """Connect to server.""" + ctx = zmq.Context.instance() + socket = ctx.socket(zmq.REQ) + client_name = uuid.uuid1().hex[:8] + client_secret_key = os.path.join(temp_path, "{}.key_secret".format(client_name)) + if not os.path.exists(client_secret_key): + client_public_key, client_secret_key = zmq.auth.create_certificates(temp_path, client_name) + client_public, client_secret = zmq.auth.load_certificate(client_secret_key) + socket.curve_secretkey = client_secret + socket.curve_publickey = client_public + server_public_key = os.path.join(temp_path, "server.key") + if not os.path.exists(server_public_key): + server_public_key, _ = zmq.auth.create_certificates(temp_path, "server") + server_public, _ = zmq.auth.load_certificate(server_public_key) + socket.curve_serverkey = server_public + socket.connect(f"tcp://{ip}:{port}") + if os.path.exists(client_secret_key): + os.remove(client_secret_key) + if os.path.exists(client_public_key): + os.remove(client_public_key) + return socket diff --git a/vega/tools/README.md b/vega/tools/README.md deleted file mode 100644 index e3557b0..0000000 --- a/vega/tools/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# command line tools - -## fully train - -usage: - -```text -usage: fully_train.py [-h] [-backend GENERAL.BACKEND] - [-devices_per_trainer GENERAL.WORKER.devices_per_trainer] - [-master_ip GENERAL.CLUSTER.MASTER_IP] - [-slaves [GENERAL.CLUSTER.SLAVES [GENERAL.CLUSTER.SLAVES ...]]] - -dataset DATASET.TYPE - [-data_path DATASET.COMMON.DATA_PATH] - [-batch_size DATASET.COMMON.BATCH_SIZE] - [-model_desc MODEL.MODEL_DESC] - [-model_file MODEL.PRETRAINED_MODEL_FILE] - [-epochs TRAINER.EPOCHS] - [-evaluator [EVALUATOR [EVALUATOR ...]]] - -Fully train model. - -optional arguments: - -h, --help show this help message and exit - -backend GENERAL.BACKEND, --general.backend GENERAL.BACKEND - pytorch|tensorflow|mindspore - -devices_per_trainer GENERAL.WORKER.devices_per_trainer, --general.worker.devices_per_trainer GENERAL.WORKER.devices_per_trainer - -master_ip GENERAL.CLUSTER.MASTER_IP, --general.cluster.master_ip GENERAL.CLUSTER.MASTER_IP - -slaves [GENERAL.CLUSTER.SLAVES [GENERAL.CLUSTER.SLAVES ...]], --general.cluster.slaves [GENERAL.CLUSTER.SLAVES [GENERAL.CLUSTER.SLAVES ...]] - slave IP list - -dataset DATASET.TYPE, --dataset.type DATASET.TYPE - dataset name. - -data_path DATASET.COMMON.DATA_PATH, --dataset.common.data_path DATASET.COMMON.DATA_PATH - dataset path. - -batch_size DATASET.COMMON.BATCH_SIZE, --dataset.common.batch_size DATASET.COMMON.BATCH_SIZE - -model_desc MODEL.MODEL_DESC, --model.model_desc MODEL.MODEL_DESC - -model_file MODEL.PRETRAINED_MODEL_FILE, --model.pretrained_model_file MODEL.PRETRAINED_MODEL_FILE - -epochs TRAINER.EPOCHS, --trainer.epochs TRAINER.EPOCHS - -evaluator [EVALUATOR [EVALUATOR ...]], --evaluator [EVALUATOR [EVALUATOR ...]] - evaluator list, eg. -evaluator HostEvaluator DeviceEvaluator -``` - -example: - -```text -python3 -m vega.tools.fully_train -dataset Cifar10 -batch_size 8 -data_path /cache/datasets/cifar10 -model_desc ./tasks/nas/workers/nas1/1/desc_1.json -epochs 1 -evaluator HostEvaluator -``` - -## benchmark - -usage: - -```text -usage: benchmark.py [-h] [-backend GENERAL.BACKEND] -dataset DATASET.TYPE - [-data_path DATASET.COMMON.DATA_PATH] - [-batch_size DATASET.COMMON.BATCH_SIZE] - [-model_desc MODEL.MODEL_DESC] - [-model_file MODEL.PRETRAINED_MODEL_FILE] - [-evaluator [EVALUATOR [EVALUATOR ...]]] - -Benchmark. - -optional arguments: - -h, --help show this help message and exit - -backend GENERAL.BACKEND, --general.backend GENERAL.BACKEND - pytorch|tensorflow|mindspore - -dataset DATASET.TYPE, --dataset.type DATASET.TYPE - dataset name. - -data_path DATASET.COMMON.DATA_PATH, --dataset.common.data_path DATASET.COMMON.DATA_PATH - dataset path. - -batch_size DATASET.COMMON.BATCH_SIZE, --dataset.common.batch_size DATASET.COMMON.BATCH_SIZE - -model_desc MODEL.MODEL_DESC, --model.model_desc MODEL.MODEL_DESC - -model_file MODEL.PRETRAINED_MODEL_FILE, --model.pretrained_model_file MODEL.PRETRAINED_MODEL_FILE - -evaluator [EVALUATOR [EVALUATOR ...]], --evaluator [EVALUATOR [EVALUATOR ...]] - evaluator list, eg. -evaluator HostEvaluator DeviceEvaluator -``` - -example: - -```bash -python3 -m vega.tools.benchmark -dataset Cifar10 -batch_size 8 -data_path /cache/datasets/cifar10 -model_desc ./tasks/fullytrain/output/fully_train/desc_0.json -model_file=./tasks/fullytrain/output/fully_train/model_0.pth -evaluator HostEvaluator -``` diff --git a/vega/tools/args.py b/vega/tools/args.py deleted file mode 100644 index a8829cd..0000000 --- a/vega/tools/args.py +++ /dev/null @@ -1,79 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Inference of vega model.""" -from vega.common import argment_parser -from vega.common.config import Config, build_tree -from vega.common.general import General -from vega.core.pipeline.conf import PipelineConfig, PipeStepConfig -from vega.trainer.conf import TrainerConfig - - -def _parse_args(sections, desc): - parser = argment_parser(desc) - parser.add_argument("-backend", "--general.backend", default="pytorch", type=str, - help="pytorch|tensorflow|mindspore") - if "cluster" in sections: - parser.add_argument("-devices_per_trainer", "--general.worker.devices_per_trainer", default=None, type=int) - parser.add_argument("-master_ip", "--general.cluster.master_ip", default=None, type=str) - parser.add_argument("-slaves", "--general.cluster.slaves", default=[], - action='store', dest='general.cluster.slaves', type=str, nargs='*', - help="slave IP list") - parser.add_argument("-dataset", "--dataset.type", required=True, type=str, help="dataset name.") - parser.add_argument("-data_path", "--dataset.common.data_path", type=str, help="dataset path.") - parser.add_argument("-batch_size", "--dataset.common.batch_size", default=256, type=int) - if "model" in sections: - parser.add_argument("-model_desc", "--model.model_desc", type=str) - parser.add_argument("-model_file", "--model.pretrained_model_file", type=str) - if "trainer" in sections: - parser.add_argument("-epochs", "--trainer.epochs", type=int) - if "fine_tune" in sections: - parser.add_argument("-task_type", "--task_type", default="classification", type=str, - help="classification|detection|segmentation|super_resolution") - parser.add_argument("-num_classes", "--trainer.num_classes", type=int) - parser.add_argument("-evaluator", "--evaluator", default=[], - action='store', dest='evaluator', type=str, nargs='*', - help="evaluator list, eg. -evaluator HostEvaluator DeviceEvaluator") - args = vars(parser.parse_args()) - args = {key: value for key, value in args.items() if args[key]} - tree = Config(build_tree(args)) - return tree - - -def _set_config(args, step_name, step_type): - """Fully train.""" - # general - General.step_name = step_name - if hasattr(args, "general"): - General.from_dict(args.general) - # pipeline - PipelineConfig.steps = [step_name] - # pipestep - PipeStepConfig.type = step_type - # model - if hasattr(args, "model"): - if hasattr(args.model, "model_desc"): - args.model.model_desc = Config(args.model.model_desc) - PipeStepConfig.model.from_dict(args.model) - # dataset - if hasattr(args, "dataset"): - PipeStepConfig.dataset.from_dict(args.dataset) - # trainer - if hasattr(args, "trainer"): - TrainerConfig.from_dict(args.trainer) - # evaluator - if hasattr(args, "evaluator"): - # PipeStepConfig.evaluator._type_name = args.evaluator - if "HostEvaluator" in args.evaluator: - PipeStepConfig.evaluator_enable = True - PipeStepConfig.evaluator.host_evaluator_enable = True - if "DeviceEvaluator" in args.evaluator: - PipeStepConfig.evaluator_enable = True - PipeStepConfig.evaluator.device_evaluator_enable = True diff --git a/vega/tools/benchmark.py b/vega/tools/benchmark.py deleted file mode 100644 index dfb0298..0000000 --- a/vega/tools/benchmark.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Inference of vega model.""" -import vega -from vega.core.pipeline.benchmark_pipe_step import BenchmarkPipeStep -from vega.tools.init_env import _init_env -from vega.tools.args import _parse_args, _set_config - - -def _benchmark(): - args = _parse_args(["model"], "Benchmark.") - vega.set_backend(args.general.backend) - _set_config(args, "benchmark", "BenchmarkPipeStep") - _init_env() - BenchmarkPipeStep().do() - - -if __name__ == "__main__": - _benchmark() diff --git a/vega/tools/cam.py b/vega/tools/cam.py deleted file mode 100644 index 2384f0c..0000000 --- a/vega/tools/cam.py +++ /dev/null @@ -1,113 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""CAM.""" -import vega -import numpy as np -import cv2 -import torch -from vega.common import argment_parser - - -def _predict_on_weights(feature_maps, weights): - gap = np.average(feature_maps, axis=(0, 1)) - logit = np.dot(gap, np.squeeze(weights)) - return 1 / (1 + np.e ** (-logit)) - - -def _get_cam(image, feature_maps, weights, display=False): - predict = _predict_on_weights(feature_maps, weights) - cam = (predict - 0.5) * np.matmul(feature_maps, weights) - cam = (cam - cam.min()) / (cam.max() - cam.min()) - (width, height, channel) = image.shape - cam = cv2.resize(np.array(cam), (width, height)) - cam = 255 * cam - cam = cam.astype(np.uint8) - heatmap = cv2.applyColorMap(cam, cv2.COLORMAP_JET) - heatmap[np.where(cam <= 100)] = 0 - image = 255 * image - image = image.astype(np.uint8) - out = cv2.addWeighted(src1=image, alpha=0.8, src2=heatmap, beta=0.4, gamma=0) - return out - - -def _load_image(image_file): - img = cv2.imread(image_file) - img = img / 255 - img = img.astype(np.float32) - width, height, channel = img.shape - return img.reshape(1, channel, height, width) - - -def _to_tensor(data): - data = torch.tensor(data) - return data.cuda() - - -def _get_model(args): - from vega.model_zoo import ModelZoo - model = ModelZoo.get_model(args.model_desc_file, args.model_weights_file) - model = model.cuda() - model.eval() - return model - - -def _infer_pytorch(model, data): - with torch.no_grad(): - logits = model(data) - logits = logits.tolist()[0] - return logits - - -def _hook(model, input, output): - setattr(model, "feature_maps", input[0][0].cpu()) - - -def _cam(args): - img = _load_image(args.input_image_file) - data = _to_tensor(img) - model = _get_model(args) - handle = next(model.head.children()).register_forward_hook(_hook) - result = _infer_pytorch(model, data) - handle.remove() - cat = result.index(max(result)) - img = data[0].cpu().detach().numpy() - channel, height, width = img.shape - img = img.reshape(width, height, channel) - feature_maps = next(model.head.children()).feature_maps - channel, height, width = feature_maps.shape - feature_maps = feature_maps.reshape(width, height, channel) - weights = model.head.linear.weight[cat].cpu().detach().numpy() - cam = _get_cam(img, feature_maps, weights) - cv2.imwrite(args.output_image_file, cam) - - -def _parse_args(): - parser = argment_parser("Generate CAM(Class Activation Map) file.") - parser.add_argument("-i", "--input_image_file", required=True, type=str, help="Input image file.") - parser.add_argument("-o", "--output_image_file", required=True, type=str, help="Output image file.") - parser.add_argument("-d", "--model_desc_file", required=True, type=str, help="Model description file.") - parser.add_argument("-w", "--model_weights_file", required=True, type=str, help="Model weights file(.pth).") - args = parser.parse_args() - return args - - -if __name__ == "__main__": - vega.set_backend("pytorch") - args = _parse_args() - print("model description file: {}".format(args.model_desc_file)) - print("model weights file: {}".format(args.model_weights_file)) - print("input image: {}".format(args.input_image_file)) - print("output image: {}".format(args.output_image_file)) - try: - _cam(args) - print("OK.") - except Exception as e: - raise e diff --git a/vega/tools/detection_api.py b/vega/tools/detection_api.py deleted file mode 100644 index a35b1e5..0000000 --- a/vega/tools/detection_api.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Inference of object detection model.""" -from PIL import Image -import matplotlib.pyplot as plt -import torchvision.transforms as T -import cv2 -from vega.model_zoo.model_zoo import ModelZoo - - -class ObjectDetectionAPI(object): - """ObjectDetection API.""" - - def __init__(self, desc_file, pretrained_model_file, threshold=0.5): - super().__init__() - self.model = ModelZoo().get_model(desc_file, pretrained_model_file) - self.threshold = threshold - - def predict(self, img_path, category_names): - """Predict one img.""" - img = Image.open(img_path) - transform = T.Compose([T.ToTensor()]) - img = transform(img) - self.model.eval() - pred = self.model([img]) - pred_class = [category_names[i] for i in list(pred[0]['labels'].numpy())] - pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())] - pred_score = list(pred[0]['scores'].detach().numpy()) - pred_t = [pred_score.index(x) for x in pred_score if x > self.threshold][-1] - pred_boxes = pred_boxes[:pred_t + 1] - pred_class = pred_class[:pred_t + 1] - self._show(pred_boxes, pred_class, img_path) - return pred_boxes, pred_class - - def _show(self, boxes, pred_cls, img_path, rect_th=3, text_size=3, text_th=3): - img = cv2.imread(img_path) - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - for i, box in enumerate(boxes): - cv2.rectangle(img, box[0], box[1], color=(0, 255, 0), thickness=rect_th) - cv2.putText(img, pred_cls[i], box[0], cv2.FONT_HERSHEY_SIMPLEX, text_size, (0, 255, 0), thickness=text_th) - plt.figure(figsize=(20, 30)) - plt.imshow(img) - plt.xticks([]) - plt.yticks([]) - plt.show() diff --git a/vega/tools/detection_inference.py b/vega/tools/detection_inference.py index 9472c71..60e15e7 100644 --- a/vega/tools/detection_inference.py +++ b/vega/tools/detection_inference.py @@ -1,27 +1,35 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Inference of vega detection model.""" import vega from vega.common import argment_parser from vega.common import FileOps +from vega.common.general import General +from vega import security def _load_data(args): """Load data from path.""" if args.data_format == 'CULANE': - return vega.dataset("AutoLaneDataset", dataset_format="CULane", data_path=args.data_path, mode="test", + return vega.get_dataset("AutoLaneDataset", dataset_format="CULane", data_path=args.data_path, mode="test", batch_size=args.batch_size).loader elif args.data_format == 'COCO': - return vega.dataset("CocoDataset", data_root=args.data_path, mode="test", + return vega.get_dataset("CocoDataset", data_root=args.data_path, mode="test", batch_size=args.batch_size).loader @@ -108,13 +116,21 @@ def parse_args_parser(): help="output file. " "type: pkl" ) + parser = security.args.add_args(parser) args = parser.parse_args() + security.args.check_args(args) return args def main(): """Inference.""" args = parse_args_parser() + if args.security: + if not security.load_config("client"): + print("If you run vega in normal mode, use parameter '-s'.") + print("For more parameters: vega-inference-det --help") + return + General.security = args.security vega.set_backend(args.backend, args.device) print("Start building model.") model = _get_model(args) diff --git a/vega/tools/fine_tune.py b/vega/tools/fine_tune.py deleted file mode 100644 index 185edcb..0000000 --- a/vega/tools/fine_tune.py +++ /dev/null @@ -1,229 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Fine tune vega model.""" - -import pandas as pd -import logging -import json -from vega.common import argment_parser -from vega.common.general import General -from vega.common.task_ops import TaskOps -from vega.common.file_ops import FileOps -from vega.core.pipeline.conf import PipelineConfig, PipeStepConfig -from vega.trainer.conf import TrainerConfig -from vega.core.pipeline.train_pipe_step import TrainPipeStep -from vega.tools.init_env import _init_env -from vega.tools.run_pipeline import _set_backend - - -def _parse_args(): - parser = argment_parser("Fine tune DNet model or ResNet model.") - group_backend = parser.add_argument_group(title="Set backend and device, default is pytorch and GPU") - group_backend.add_argument("-b", "--backend", default="pytorch", type=str, - choices=["pytorch", "p", "tensorflow", "t", "mindspore", "m"], - help="set training platform") - group_backend.add_argument("-d", "--device", default="GPU", type=str, - choices=["GPU", "NPU"], - help="set training device") - group_dataset = parser.add_argument_group(title="Dataset setting") - group_dataset.add_argument("-ds", "--dataset", default=None, type=str, required=True, - help="dataset type, eg. Cifar10, ClassificationDataset.") - group_dataset.add_argument("-dp", "--data_path", default=None, type=str, required=True, - help="dataset path.") - group_dataset.add_argument("-bs", "--batch_size", default=None, type=int, required=True, - help="dataset batch size.") - group_dataset.add_argument("-tp", "--train_portion", default=1.0, type=float, - help="train portion.") - group_dataset.add_argument("-is", "--image_size", default=224, type=int, - help="image size.") - group_trainer = parser.add_argument_group(title="Trainer setting") - group_trainer.add_argument("-e", "--epochs", default=40, type=int, - help="Modify fully_train epochs") - group_model = parser.add_argument_group(title="model setting") - group_model.add_argument("-n", "--network", default=None, type=str, - choices=["dnet", "resnet"], - help="network name, dnet or resnet.") - # denet - group_model.add_argument("-de", "--dnet_encoding", default=None, type=str, - help="DNet network Encoding") - # resnet - group_model.add_argument("-rd", "--resnet_depth", default=50, type=int, - help="ResNet network depth") - # general - group_model.add_argument("-mf", "--pretrained_model_file", default=None, type=str, required=True, - help="pretrained model file") - group_model.add_argument("-nc", "--num_classes", default=None, type=int, required=True, - help="number of classes") - group_output = parser.add_argument_group(title="output setting") - group_output.add_argument("-o", "--output_path", default=None, type=int, - help="set output path") - args = parser.parse_args() - return args - - -def _set_pipeline_config(args): - General.step_name = "fine_tune" - PipelineConfig.steps = ["fine_tune"] - PipeStepConfig.type = "TrainPipeStep" - - -def _set_dataset_config(args): - PipeStepConfig.dataset.from_dict({ - "type": args.dataset, - "common": { - "data_path": args.data_path, - "batch_size": args.batch_size, - "train_portion": args.train_portion, - }, - "train": { - "transforms": [ - {"type": "Resize", "size": [args.image_size + 32, args.image_size + 32]}, - {"type": "RandomCrop", "size": [args.image_size, args.image_size]}, - {"type": "RandomHorizontalFlip"}, - {"type": "ToTensor"}, - {"type": "Normalize", "mean": [0.50, 0.5, 0.5], "std": [0.50, 0.5, 0.5]}, - ] - }, - "val": { - "transforms": [ - {"type": "Resize", "size": [args.image_size, args.image_size]}, - {"type": "ToTensor"}, - {"type": "Normalize", "mean": [0.50, 0.5, 0.5], "std": [0.50, 0.5, 0.5]}, - ] - }, - "test": { - "transforms": [ - {"type": "Resize", "size": [args.image_size, args.image_size]}, - {"type": "ToTensor"}, - {"type": "Normalize", "mean": [0.50, 0.5, 0.5], "std": [0.50, 0.5, 0.5]}, - ] - }, - }) - - -def _set_model_config(args): - if args.network == "dnet": - config = { - "model_desc": { - "type": "DNet", - "n_class": args.num_classes, - "encoding": args.dnet_encoding, - }, - "pretrained_model_file": args.pretrained_model_file, - "head": "fc", - } - if args.backend in ["mindspore", "m"]: - config = { - "model_desc": { - "modules": ["backbone"], - "backbone": { - "type": "DNet", - "n_class": args.num_classes, - "encoding": args.dnet_encoding, - }, - }, - "pretrained_model_file": args.pretrained_model_file, - } - elif args.network == "resnet": - config = { - "model_desc": { - "type": "ResNetTF", - "resnet_size": args.resnet_depth, - "num_classes": args.num_classes, - }, - "pretrained_model_file": args.pretrained_model_file, - "head": "resnet_model/dense/", - } - else: - raise Exception("Not supported network: {}".format(args.network)) - PipeStepConfig.model.from_dict(config) - - -def _set_trainer_config(args): - config = { - "epochs": args.epochs, - "loss": { - "type": "CrossEntropyLoss", - "params": {"sparse": True}, - }, - "optimizer": { - "type": "SGD", - "params": { - "lr": 0.003, - "momentum": 0.9, - "weight_decay": 0.0001, - }, - }, - "lr_scheduler": { - "type": "WarmupScheduler", - "by_epoch": False, - "params": { - "warmup_type": "linear", - "warmup_iters": 500, - "warmup_ratio": 0.01, - "after_scheduler_config": { - "type": "MultiStepLR", - "by_epoch": True, - "params": { - "milestones": [30], - "gamma": 0.1, - }, - }, - }, - }, - } - if args.backend in ["pytorch", "p"]: - pass - elif args.backend in ["tensorflow", "t"]: - config["lr_scheduler"]["by_epoch"] = True - config["lr_scheduler"]["params"]["warmup_iters"] = 5 - elif args.backend in ["mindspore", "m"]: - config["lr_scheduler"]["params"]["warmup_ratio"] = 0.00001 - config["optimizer"] = { - "type": "Adam", - "params": {"lr": 0.0001}, - } - else: - raise Exception("Not") - TrainerConfig.from_dict(config) - - -def _show_performance(): - output_file = FileOps.join_path( - TaskOps().local_output_path, General.step_name, "output.csv") - try: - data = pd.read_csv(output_file) - except Exception: - logging.info(" Result file output.csv is not existed or empty.") - return - if data.shape[1] < 2 or data.shape[0] == 0: - logging.info(" Result file output.csv is empty.") - return - logging.info("-" * 48) - data = json.loads(data.to_json()) - logging.info(" result: {}".format(data["performance"]["0"])) - logging.info("-" * 48) - - -def _fine_tune(): - args = _parse_args() - _set_backend(args) - _set_pipeline_config(args) - _set_dataset_config(args) - _set_model_config(args) - _set_trainer_config(args) - _init_env() - TrainPipeStep().do() - _show_performance() - - -if __name__ == "__main__": - _fine_tune() diff --git a/vega/tools/fully_train.py b/vega/tools/fully_train.py deleted file mode 100644 index ff51c42..0000000 --- a/vega/tools/fully_train.py +++ /dev/null @@ -1,27 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Inference of vega model.""" -import vega -from vega.core.pipeline.train_pipe_step import TrainPipeStep -from vega.tools.init_env import _init_env -from vega.tools.args import _parse_args, _set_config - - -def _fully_train(): - args = _parse_args(["cluster", "model", "trainer"], "Fully train model.") - vega.set_backend(args.general.backend) - _set_config(args, "fully_train", "TrainPipeStep") - _init_env() - TrainPipeStep().do() - - -if __name__ == "__main__": - _fully_train() diff --git a/vega/tools/inference.py b/vega/tools/inference.py index 6a2de2f..2455e07 100644 --- a/vega/tools/inference.py +++ b/vega/tools/inference.py @@ -1,22 +1,30 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Inference of vega model.""" -import pickle import os +import csv import numpy as np import cv2 -import csv import vega from vega.common import argment_parser +from vega.common import FileOps +from vega.common.general import General +from vega import security def _load_data(args): @@ -39,7 +47,7 @@ def _load_image(image_file): return img.reshape(1, channel, height, width) -def _to_tensor(data): +def _to_tensor(args, data): """Change data to tensor.""" if vega.is_torch_backend(): import torch @@ -68,20 +76,20 @@ def _get_model(args): def _infer(args, loader, model=None): """Choose backend.""" if vega.is_torch_backend(): - return _infer_pytorch(model, loader) + return _infer_pytorch(args, model, loader) elif vega.is_tf_backend(): return _infer_tf(args, model, loader) elif vega.is_ms_backend(): return _infer_ms(args, model, loader) -def _infer_pytorch(model, loader): +def _infer_pytorch(args, model, loader): """Infer with pytorch.""" infer_result = [] import torch with torch.no_grad(): for file_name in loader: - data = _to_tensor(_load_image(file_name)) + data = _to_tensor(args, _load_image(file_name)) logits = model(data) logits = logits[0].tolist() infer_result.append((os.path.basename(file_name), logits)) @@ -116,7 +124,6 @@ def _infer_tf(args, model, loader): def _infer_ms(): """Infer with ms.""" - # TODO pass @@ -134,8 +141,7 @@ def _save_result(args, result): else: if not _output_file: _output_file = "./result.pkl" - with open(_output_file, 'wb') as f: - pickle.dump(result, f) + FileOps.dump_pickle(result, _output_file) print('Results of Inference is saved in {}.'.format(_output_file)) @@ -173,12 +179,21 @@ def parse_args_parser(): "segmentation: ./result.pkl, " "detection: ./result.pkl " ) + parser = security.args.add_args(parser) args = parser.parse_args() + security.args.check_args(args) return args -if __name__ == '__main__': +def main(): + """Inference.""" args = parse_args_parser() + if args.security: + if not security.load_config("client"): + print("If you run vega in normal mode, use parameter '-s'.") + print("For more parameters: vega-inference --help") + return + General.security = args.security vega.set_backend(args.backend, args.device) print("Start building model.") model = _get_model(args) @@ -188,3 +203,7 @@ def parse_args_parser(): result = _infer(args, loader, model) _save_result(args, result) print("Completed successfully.") + + +if __name__ == '__main__': + main() diff --git a/vega/tools/init_env.py b/vega/tools/init_env.py deleted file mode 100644 index 3aaa314..0000000 --- a/vega/tools/init_env.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Set env.""" -import sys -import logging -from vega.common import init_log -from vega.common.general import General -from vega.common.task_ops import TaskOps -from vega.core.run import init_cluster_args - -logger = logging.getLogger(__name__) - - -def _init_env(): - if sys.version_info < (3, 6): - sys.exit('Sorry, Python < 3.6 is not supported.') - init_log(level=General.logger.level, - log_path=TaskOps().local_log_path) - General.env = init_cluster_args() - _print_task_id() - - -def _print_task_id(): - logging.info("-" * 48) - logging.info(" task id: {}".format(General.task.task_id)) - logging.info("-" * 48) diff --git a/vega/tools/kill.py b/vega/tools/kill.py index 9215d96..7ecd55c 100644 --- a/vega/tools/kill.py +++ b/vega/tools/kill.py @@ -1,21 +1,30 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Kill vega progress.""" +import logging import os import signal -import psutil import time +import psutil from vega.common import argment_parser from vega.tools.query_process import query_process, get_pid, query_processes, get_vega_pids, print_process +from vega import security +from vega.common.general import General def _parse_args(desc): @@ -29,7 +38,9 @@ def _parse_args(desc): help="kill all Vega main process") group.add_argument("-f", "--force", action='store_true', help="Forcibly kill all Vega-related processes even if the main process does not exist") + parser = security.add_args(parser) args = parser.parse_args() + security.check_args(args) return args @@ -52,7 +63,7 @@ def _kill_vega_process(pid): try: os.kill(pid, signal.SIGINT) except Exception: - pass + logging.debug('Failed to kill pid {}.'.format(pid)) _wait(3) spids.append(pid) not_stoped = _check_exited(spids) @@ -60,7 +71,7 @@ def _kill_vega_process(pid): try: os.kill(pid, signal.SIGKILL) except Exception: - pass + logging.debug('Failed to kill pid {}.'.format(pid)) _wait(5) print("") not_stoped = _check_exited(not_stoped) @@ -104,14 +115,14 @@ def _kill_all_vega_process(): try: os.kill(pid, signal.SIGINT) except Exception: - pass + logging.debug('Failed to kill pid {}.'.format(pid)) _wait(3) not_stoped = _check_exited(all_spids) for pid in not_stoped: try: os.kill(pid, signal.SIGKILL) except Exception: - pass + logging.debug('Failed to kill pid {}.'.format(pid)) _wait(5) print("") not_stoped = _check_exited(not_stoped) @@ -122,7 +133,9 @@ def _kill_all_vega_process(): print("All Vega processes have been killed.") -def _get_sub_processes(pid, cpids=[]): +def _get_sub_processes(pid, cpids=None): + if cpids is None: + cpids = [] p = psutil.Process(pid) for cp in p.children(): cpid = cp.pid @@ -130,7 +143,7 @@ def _get_sub_processes(pid, cpids=[]): try: _get_sub_processes(cpid, cpids) except Exception: - pass + logging.debug('Failed to get sub_process {}.'.format(cpid)) return cpids @@ -151,7 +164,7 @@ def _force_kill(): try: os.kill(pid, signal.SIGKILL) except Exception: - pass + logging.debug('Failed to kill pid {}.'.format(pid)) _wait(5) print("") not_stoped = _check_exited(vega_pids) @@ -169,6 +182,7 @@ def _get_all_related_processes(): try: p = psutil.Process(pid) except Exception: + logging.debug('Failed to get pid {}.'.format(pid)) continue if p.name() in ["vega", "dask-scheduler", "dask-worker", "vega-main"]: vega_pids.append(pid) @@ -199,8 +213,15 @@ def _wait(seconds): time.sleep(0.5) -def _kill(): +def main(): + """Kill vega process.""" args = _parse_args("Kill Vega processes.") + if args.security: + if not security.load_config("client"): + print("If you run vega in normal mode, use parameter '-s'.") + print("For more parameters: vega-kill --help") + return + General.security = args.security if args.pid: _kill_vega_process(args.pid) elif args.task_id: @@ -212,4 +233,4 @@ def _kill(): if __name__ == "__main__": - _kill() + main() diff --git a/vega/tools/query_process.py b/vega/tools/query_process.py index 01dceaa..103b310 100644 --- a/vega/tools/query_process.py +++ b/vega/tools/query_process.py @@ -1,25 +1,34 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Query vega process.""" -import psutil +import logging import json import time +import psutil from psutil import _pprint_secs from vega.common import MessageServer, MessageClient, argment_parser +from vega import security +from vega.common.general import General __all__ = [ "query_task_info", "get_pid", "is_vega_process", "get_vega_pids", - "query_process", "query_processes", "print_process", "print_processes", + "query_process", "query_processes", "print_process" ] @@ -27,6 +36,7 @@ def _parse_args(desc): parser = argment_parser(desc) parser.add_argument("-j", "--json", action='store_true', help="return json format string") + parser = security.add_args(parser) args = parser.parse_args() return args @@ -40,6 +50,7 @@ def get_vega_pids(): try: p = psutil.Process(pid) except Exception: + logging.debug('Failed to get obj of pid.') continue ppid = p.ppid() if ppid in [_pid for (_pid, _ppid) in vega_pids]: @@ -63,10 +74,10 @@ def get_task_id_path_port(pid): client = MessageClient(ip=ip, port=port, timeout=1) result = client.send(action="query_task_info") if isinstance(result, dict) and "task_id" in result: - return result.get("task_id"), result.get("base_path"), ip, port - return None, None, None, None - except Exception: - return None, None, None, None + return result.get("task_id"), result.get("base_path"), ip, port, None + return None, None, None, None, "Unknown" + except Exception as e: + return None, None, None, None, str(e) def get_pid(task_id): @@ -134,13 +145,13 @@ def query_process(pid): """Query process info.""" try: p = psutil.Process(pid) - (task_id, base_path, ip, port) = get_task_id_path_port(pid) + (task_id, base_path, ip, port, msg) = get_task_id_path_port(pid) return { "PID": pid, "cmdline": p.cmdline()[2:], "create_time": _pprint_secs(p.create_time()), "cwd": p.cwd(), - "task_id": task_id if task_id is not None else "Unknown", + "task_id": task_id if task_id is not None else f"error: {msg}", "base_path": base_path if base_path is not None else "Unknown", "user": p.username(), "ip": ip, @@ -175,9 +186,15 @@ def query_processes(): return infos -def print_processes(): +def main(): """Print all processes.""" args = _parse_args("Quey Vega processes.") + if args.security: + if not security.load_config("client"): + print("If you run vega in normal mode, use parameter '-s'.") + print("For more parameters: vega-process --help") + return + General.security = args.security processes = query_processes() if args.json: print(json.dumps(processes, indent=4)) @@ -186,4 +203,4 @@ def print_processes(): if __name__ == "__main__": - print_processes() + main() diff --git a/vega/tools/query_progress.py b/vega/tools/query_progress.py index 0125ccd..3b7e940 100644 --- a/vega/tools/query_progress.py +++ b/vega/tools/query_progress.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Inference of vega model.""" @@ -17,9 +23,12 @@ from vega.common import Status, JsonEncoder, DatatimeFormatString, argment_parser from vega.tools.query_process import query_task_info from vega.common import MessageClient +from vega import security +from vega.common.general import General __all__ = ["query_progress"] +error_message = "" def _parse_args(desc): @@ -28,7 +37,9 @@ def _parse_args(desc): help="vega application task id") parser.add_argument("-r", "--root_path", type=str, required=True, help="root path where vega application is running") + parser = security.args.add_args(parser) args = parser.parse_args() + security.args.check_args(args) return args @@ -42,7 +53,9 @@ def _load_report(report_path): try: with open(report_path, "r") as f: return json.load(f) - except Exception: + except Exception as e: + global error_message + error_message = str(e) return None @@ -95,11 +108,13 @@ def _statistic_progress(progress): for step in progress["steps"]: step["estimated_end_time"] = None if step["status"] == Status.running.value: - if "finished_epochs" in step and step["finished_epochs"] != 0: + if "finished_epochs" in step and step["finished_epochs"] != 0 and "num_epochs" in step: start_time = datetime.strptime(step["start_time"], DatatimeFormatString) delta = datetime.now() - start_time delta = delta * (step["num_epochs"] - step["finished_epochs"]) / step["finished_epochs"] step["estimated_end_time"] = datetime.now() + delta + else: + step["estimated_end_time"] = "0000-00-00 00:00:00" # count status all_finished = True progress["status"] = Status.running @@ -125,13 +140,14 @@ def _query_report(task_info): ip = task_info["ip"] client = MessageClient(ip=ip, port=port, timeout=1) return client.send(action="query_report") - except Exception: + except Exception as e: + global error_message + error_message = str(e) return None -def query_progress(times=0): +def query_progress(args, times=0): """Query vega progress.""" - args = _parse_args("Query Vega progress.") task_info = query_task_info(args.task_id) if not task_info: @@ -150,9 +166,10 @@ def query_progress(times=0): else: report = _query_report(task_info) if not report: + global error_message return json.dumps({ "status": Status.error, - "message": "Failed to query progress." + "message": f"Failed to query progress. {error_message}" }, cls=JsonEncoder, indent=4) progress = _parse_report(report) @@ -163,10 +180,17 @@ def query_progress(times=0): return json.dumps(progress, cls=JsonEncoder, indent=4) -def print_progress(): +def main(): """Print progress.""" - print(query_progress()) + args = _parse_args("Query Vega progress.") + if args.security: + if not security.load_config("client"): + print("If you run vega in normal mode, use parameter '-s'.") + print("For more parameters: vega-progress --help") + return + General.security = args.security + print(query_progress(args)) if __name__ == "__main__": - print_progress() + main() diff --git a/vega/tools/run_pipeline.py b/vega/tools/run_pipeline.py index 0a2f8e9..365e4d1 100644 --- a/vega/tools/run_pipeline.py +++ b/vega/tools/run_pipeline.py @@ -1,23 +1,30 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Run pipeline.""" import os import sys -import vega from copy import deepcopy +import vega from vega.common.general import General from vega.common.config import Config -from vega.common.utils import verify_requires -from vega.common import argment_parser +from vega.common.utils import verify_requires, verify_platform_pkgs +from vega.common.arg_parser import argment_parser, str2bool +from vega import security def _append_env(): @@ -25,8 +32,8 @@ def _append_env(): sys.path.insert(0, dir_path) if "PYTHONPATH" not in os.environ: os.environ["PYTHONPATH"] = dir_path - else: - os.environ["PYTHONPATH"] += ":{}".format(dir_path) + elif dir_path not in os.environ["PYTHONPATH"].split(":"): + os.environ["PYTHONPATH"] += f":{dir_path}" def _parse_args(): @@ -47,18 +54,35 @@ def _parse_args(): help="resume not finished task") group_resume.add_argument("-t", "--task_id", default=None, type=str, help="specify the ID of the task to be resumed") - group_config = parser.add_argument_group(title='Modify config for yml') - group_config.add_argument("-m", "--modify", action='store_true', - help="modify some config") + group_config = parser.add_argument_group(title='Modify default configs in yml') group_config.add_argument("-dt", "--dataset", default=None, type=str, help='modify dataset for all pipe_step') group_config.add_argument("-dp", "--data_path", default=None, type=str, help="modify data_path for all pipe_step") - group_config.add_argument("-bs", "--batch_size", default=None, type=str, + group_config.add_argument("-bs", "--batch_size", default=None, type=int, help='modify batch_size of dataset for all pipe_step') - group_config.add_argument("-es", "--epochs", default=None, type=str, + group_config.add_argument("-es", "--epochs", default=None, type=int, help='modify fully_train epochs') + group_cluster = parser.add_argument_group(title='Set cluster info') + group_cluster.add_argument("-sa", "--standalone_boot", default=None, type=str2bool, + help="standalone boot mode, eg. -sa true") + group_cluster.add_argument("-ps", "--parallel_search", default=None, type=str2bool, + help="parallel search") + group_cluster.add_argument("-pt", "--parallel_fully_train", default=None, type=str2bool, + help="parallel fully train") + group_cluster.add_argument("-mi", "--master_ip", default=None, type=str, + help="master ip, eg. -mi n.n.n.n") + group_cluster.add_argument("-ws", "--num_workers", default=None, type=int, + help="number of workers, eg. -ws 12") + group_cluster.add_argument("-p", "--listen_port", default=None, type=int, + help="listen port, eg. -p 8878") + group_cluster.add_argument("-sv", "--slaves", dest="slaves", nargs="+", + help="slaves, eg. -sv n.n.n.n n.n.n.n") + parser = security.add_args(parser) args = parser.parse_args() + if args.security: + security.check_args(args) + security.check_yml(args.config_file) return args @@ -85,7 +109,7 @@ def _check_parse(args): return args -def _set_backend(args): +def _get_backend_device(args): backend = args.backend device = args.device if backend: @@ -107,7 +131,7 @@ def _set_backend(args): General.backend = backend if device: General.device_category = device - vega.set_backend(General.backend, General.device_category) + return General.backend, General.device_category def _resume(args): @@ -121,13 +145,9 @@ def _resume(args): General.backup_original_value(force=True) -def _backup_config(args): - _file = args.config_file - from vega.common.task_ops import TaskOps - from vega.common.file_ops import FileOps - dest_file = FileOps.join_path(TaskOps().local_output_path, os.path.basename(_file)) - FileOps.make_base_dir(dest_file) - FileOps.copy_file(_file, dest_file) +def _backup_config(file_name, config): + from vega.common import FileOps, TaskOps + config.dump_yaml(FileOps.join_path(TaskOps().local_output_path, os.path.basename(file_name))) def _change_process_name(): @@ -137,15 +157,62 @@ def _change_process_name(): libc.prctl(15, byref(buff), 0, 0, 0) -def run_pipeline(load_special_lib_func=None): +def _set_cluster(args, config): + if "general" not in config: + config["general"] = {} + if "cluster" not in config["general"]: + config["general"]["cluster"] = {} + for key in ["parallel_search", "parallel_fully_train"]: + if args.get(key, None) is not None: + setattr(General, key, args.get(key)) + config["general"][key] = args.get(key) + for key in ["standalone_boot", "num_workers", "master_ip", "listen_port", "slaves"]: + if args.get(key, None) is not None: + setattr(General.cluster, key, args.get(key)) + config["general"]["cluster"][key] = args.get(key) + return config + + +def _check_platform_pkgs(backend, device): + result = True + if backend == "pytorch": + result = verify_platform_pkgs([ + ("torch", "torch"), + ("torchvision", "torchvision")]) + elif backend == "tensorflow": + if device == "GPU": + tensorflow = "tensorflow-gpu>=1.14.0,<2.0" + else: + tensorflow = "tensorflow" + result = verify_platform_pkgs([ + ("tensorflow", tensorflow), + ("tf_slim", "tf-slim"), + ("official", "tf-models-official==0.0.3.dev1")]) + elif backend == "mindspore": + result = verify_platform_pkgs([ + ("mindspore", "mindspore")]) + return result + + +def main(): """Run pipeline.""" args = _parse_args() _resume(args) - _set_backend(args) + if args.security: + os.umask(0o077) + if not security.load_config("all"): + print("If you want to run vega in normal mode, use parameter '-s'.") + print("For more parameters: vega --help") + return + General.security = args.security + (backend, device) = _get_backend_device(args) + if not _check_platform_pkgs(backend, device): + return + vega.set_backend(backend, device) _append_env() - if load_special_lib_func: - load_special_lib_func(args.config_file) - config = Config(args.config_file) + config = Config(args.config_file, abs_path=True) + if not security.check_risky_file(args, config): + return # load general if config.get("general"): General.from_dict(config.get("general"), skip_check=False) @@ -154,11 +221,12 @@ def run_pipeline(load_special_lib_func=None): return dict_args = vars(args) dict_args = _check_parse(dict_args) + config = _set_cluster(dict_args, config) config = _modify_config(dict_args, config) - # _backup_config(args) + _backup_config(args.config_file, config) _change_process_name() vega.run(config) if __name__ == '__main__': - run_pipeline() + main() diff --git a/vega/tools/run_slave.py b/vega/tools/run_slave.py deleted file mode 100644 index 9da4428..0000000 --- a/vega/tools/run_slave.py +++ /dev/null @@ -1,36 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Run dask worker on slave.""" - -import os -import time -import subprocess - - -def run_dask_worker(master_ip, port, num_workers): - """Run dask worker on slave.""" - success = 0 - interval = 3 # sleep 3s - for _ in range(60 * 60 // interval): - try: - subprocess.Popen( - ["dask-worker", f"{master_ip}:{port}", '--nthreads=1', '--nprocs=1', '--memory-limit=0'], - env=os.environ) - success += 1 - if success == num_workers: - break - except Exception as e: - print(f"Failed to start dask-worker ({e}), try again {interval}s later.") - time.sleep(interval) - if success != num_workers: - raise Exception("Failed to start dask-worker. Gave up.") - else: - print("dask-worker running.") diff --git a/vega/tools/st_api.py b/vega/tools/st_api.py deleted file mode 100644 index 105aab9..0000000 --- a/vega/tools/st_api.py +++ /dev/null @@ -1,58 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Inference of spatiotemporal model.""" -import itertools - -import tensorflow as tf - -from vega.datasets import Adapter -from vega.datasets.common.spatiotemporal import SpatiotemporalDataset -from vega.metrics.tensorflow.forecast import RMSE -from vega.model_zoo import ModelZoo - - -def _init_tf_estimator(desc_file, model_dir): - """Init estimator of gpu evaluator used in tf backend.""" - sess_config = tf.compat.v1.ConfigProto() - sess_config.gpu_options.allow_growth = True - config = tf.estimator.RunConfig( - model_dir=model_dir, session_config=sess_config) - model = ModelZoo().get_model(desc_file) - - def _model_fn(features, labels, mode): - """Model function of gpu evaluator.""" - model.training = False - logits = model(features) - logits = tf.cast(logits, tf.float32) - if mode == tf.estimator.ModeKeys.PREDICT: - return tf.estimator.EstimatorSpec(mode=mode, predictions=logits) - else: - eval_metric_ops = RMSE()(logits, labels) - return tf.estimator.EstimatorSpec(mode=mode, loss=tf.log(1.0), train_op=None, - eval_metric_ops=eval_metric_ops) - - return tf.estimator.Estimator(model_fn=_model_fn, config=config) - - -def predict(data_path, desc_file, pretained_model_dir=None): - """Predict Spatiotemporal.""" - dataset = SpatiotemporalDataset( - mode='test', **dict(data_path=data_path, n_his=12, n_pred=4)) - valid_loader = Adapter(dataset).loader - estimator = _init_tf_estimator(desc_file, pretained_model_dir) - eval_metrics = estimator.evaluate( - input_fn=valid_loader.input_fn, steps=len(valid_loader)) - predictions = list(itertools.islice( - estimator.predict(input_fn=valid_loader.input_fn), 10)) - print(eval_metrics) - y_pred = predictions[0].reshape(-1) * dataset.std + dataset.mean - print(y_pred) - print("mean: {:.2f}, std: {:.2f}".format(dataset.mean, dataset.std)) diff --git a/vega/tools/verify_cluster.py b/vega/tools/verify_cluster.py deleted file mode 100644 index 0f0ddc1..0000000 --- a/vega/tools/verify_cluster.py +++ /dev/null @@ -1,292 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Verify cluster env.""" - -import os -import subprocess -import uuid -import time -import psutil -import shutil -import signal -import json -from dask.distributed import Client -from vega.common import argment_parser -from vega.common.general import General -from vega.common.utils import get_available_port - - -def _parse_args(): - parser = argment_parser("Verify cluster.") - parser.add_argument("-m", "--master", default=None, type=str, required=True, - help="master node IP") - parser.add_argument("-s", "--slaves", dest="slaves", nargs="+", required=True, - help="slaves node IP, eg. -s 192.168.0.2 192.168.0.3") - parser.add_argument("-n", "--nfs_folder", default=None, type=str, required=True, - help="shared NFS folder") - parser.add_argument("-j", "--json", action='store_true', - help="silence mode, print result with json format") - args = parser.parse_args() - return args - - -_json = None -_port = None - - -def _print(value): - global _json - if not _json: - print(value) - - -def _call(cmd, **kwargs): - global _json - if _json: - return subprocess.call(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) - else: - return subprocess.call(cmd, **kwargs) - - -def _check_output(cmd): - global _json - if _json: - return subprocess.check_output(cmd, stderr=subprocess.PIPE).decode("utf-8") - else: - return subprocess.check_output(cmd).decode("utf-8") - - -def _popen(cmd): - global _json - if _json: - return subprocess.Popen(cmd, close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - else: - return subprocess.Popen(cmd, close_fds=True) - - -def _verify_ip(args): - _print("*" * 32) - _print("Start verify IP.") - for slave in args.slaves: - msg = f"Failed to access slave ({slave})." - try: - result = _call(["ping", "-c", "4", slave]) - except Exception: - raise Exception(msg) - if result != 0: - raise Exception(msg) - - msg = f"Failed to login slave ({slave}) without password." - try: - result = _call([ - "ssh", "-o", "NumberOfPasswordPrompts=0", "-o", "StrictHostKeyChecking=yes", f"{slave}", - "\"/bin/echo\""]) - except Exception: - raise Exception(msg) - if result != 0: - raise Exception(msg) - _print("Pass.") - - -def _verify_nfs(args): - _print("*" * 32) - _print("Start verify NFS.") - if not os.path.exists(args.nfs_folder): - raise Exception(f"Shared NFS folder({args.nfs_folder}) is not existed.") - for slave in args.slaves: - temp_folder = os.path.join(args.nfs_folder, uuid.uuid1().hex) - msg = f"Shared NFS folder ({slave}:{args.nfs_folder}) is not accessed." - try: - result = _call(["ssh", slave, f"mkdir {temp_folder}"]) - except Exception: - raise Exception(msg) - if result != 0: - raise Exception(msg) - - try: - result = _call(["ssh", slave, f"rm -r {temp_folder}"]) - except Exception: - raise Exception(msg) - if result != 0: - raise Exception(msg) - _print("Pass.") - - -def _verify_pkg(args): - _print("*" * 32) - _print("Start verify packages.") - # python - main_output = _check_output([General.python_command, "--version"]) - for slave in args.slaves: - slave_output = _check_output(["ssh", slave, General.python_command, "--version"]) - if main_output != slave_output: - raise Exception(f"Python version is different.\nmaster:\n{main_output}\nslave:\n{slave_output}.") - # main packages - pkgs = ["noah-vega", "distributed", "torch"] - for pkg in pkgs: - main_output = _check_output(["pip3", "show", pkg]) - properties = main_output.split("\n") - main_version = "" - for prop in properties: - if "Version:" in prop: - main_version = prop - if main_version == "": - raise Exception(f"Package ({pkg}) is missing.") - for slave in args.slaves: - slave_output = _check_output(["ssh", slave, "pip3", "show", pkg]) - properties = slave_output.split("\n") - slave_version = "" - for prop in properties: - if "Version:" in prop: - slave_version = prop - if main_version != slave_version: - raise Exception(f"Package is different.\n\nmaster:\n{main_output}\n\nslave:\n{slave_output}.") - _print("Pass.") - - -def _kill_existed_dask(args): - pids = psutil.pids() - dask_pids = [] - for pid in pids: - try: - process = psutil.Process(pid) - pname = process.name() - if "dask-scheduler" in pname or "dask-worker" in pname: - dask_pids.append(pid) - except Exception: - pass - if dask_pids: - _print("Found existed dask scheduler or dask worker processes.") - _input = input("Do you want kill dask processes and continue to verify? [Y/n]: ") - if _input.upper() in ["N", "NO"]: - _print("Cluster verification canceled.") - os._exit(0) - elif _input.upper() not in ["", "Y", "YES"]: - _print("Input Error.") - os._exit(0) - for pid in dask_pids: - os.kill(int(pid), signal.SIGKILL) - time.sleep(10) - - -def _init_dask_scheduler(args): - _print("Start verify scheduler.") - global _port - _port = str(get_available_port()) - try: - result = _popen(["dask-scheduler", "--no-dashboard", "--no-show", "--port", _port]) - except Exception: - raise Exception("Failed to start dask scheduler.") - if not isinstance(result, subprocess.Popen): - _print("Failed to start dask scheduler.") - _print("Please run the command in CLI, and resovlue the problems.") - _print(f"dask-scheduler --no-dashboard --no-show --port {_port}") - raise Exception("Failed to start dask scheduler.") - time.sleep(5) - _print("Pass.") - - -def _verfiy_local(args): - global _port - _print(f"Start verify local worker, IP:{args.master}, port: {_port}.") - try: - result = _popen(["dask-worker", f"{args.master}:{_port}"]) - except Exception: - raise Exception("Can not start local dask-worker.") - if not isinstance(result, subprocess.Popen): - raise Exception("Can not start local dask-worker.") - time.sleep(5) - _print("Pass.") - - _print("Test local dask Client.") - cmd = f"{General.python_command} -c \"from dask.distributed import Client;"\ - f"client=Client('{args.master}:{_port}');client.close()\"" - try: - result = _call(cmd, shell=True) - except Exception: - raise Exception("Can not start local dask client.") - if result != 0: - raise Exception("Can not start local dask client.") - _print("Pass.") - - -def _verify_client(args): - global _port - _print("Start verify slave workers.") - for slave in args.slaves: - _print(f"Start verify slave({slave}) worker.") - try: - result = _popen(["ssh", slave, f"{shutil.which('dask-worker')} {args.master}:{_port}"]) - except Exception: - raise Exception(f"Can not start slave({slave}) dask-worker.") - if not isinstance(result, subprocess.Popen): - raise Exception(f"Can not start slave({slave}) dask-worker.") - time.sleep(5) - _print("Pass.") - - _print(f"Test slave({slave}) dask Client.") - cmd = f"{General.python_command} -c \"from dask.distributed import Client;"\ - f"client=Client('{args.master}:{_port}');client.close()\"" - try: - result = _call(cmd, shell=True, env=os.environ) - except Exception: - raise Exception(f"Can not start slave({slave}) dask client.") - if result != 0: - raise Exception(f"Can not start slave({slave}) dask client.") - time.sleep(5) - _print("Pass.") - _print("Pass.") - - -def _stop_dask_scheduler(args): - global _port - _print("Start stop scheduler.") - client = Client(f"{args.master}:{_port}") - try: - client.shutdown() - client.close() - del client - time.sleep(8) - except Exception: - _print("Failed to stop scheduler, please stop it manually.") - - -def _verify_dask(args): - _print("*" * 32) - # _kill_existed_dask(args) - _init_dask_scheduler(args) - _verfiy_local(args) - _verify_client(args) - _stop_dask_scheduler(args) - _print("Pass.") - - -def _verify_cluster(): - args = _parse_args() - global _json - _json = args.json - try: - _verify_ip(args) - _verify_nfs(args) - _verify_pkg(args) - _verify_dask(args) - _print("All cluster check items have passed.") - if args.json: - print(json.dumps({"status": "success"}, indent=4)) - except Exception as e: - _print("") - _print(f"Exception:\n\n{str(e)}") - if args.json: - print(json.dumps({"status": "error", "message": str(e)}, indent=4)) - - -if __name__ == "__main__": - _verify_cluster() diff --git a/vega/trainer/__init__.py b/vega/trainer/__init__.py index b3656ac..8bfcff8 100644 --- a/vega/trainer/__init__.py +++ b/vega/trainer/__init__.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Import and register trainer automatically.""" from vega.common.class_factory import ClassFactory - ClassFactory.lazy_register("vega.trainer", { "trainer_torch": ["TrainerTorch"], "trainer_tf": ["TrainerTf"], "trainer_ms": ["TrainerMs"], "trainer": ["Trainer"], "script_runner": ["ScriptRunner"], + "tuner": ["Tuner"] }) diff --git a/vega/trainer/callbacks/__init__.py b/vega/trainer/callbacks/__init__.py index 041e064..c6116ae 100644 --- a/vega/trainer/callbacks/__init__.py +++ b/vega/trainer/callbacks/__init__.py @@ -1,6 +1,6 @@ +from vega.common.class_factory import ClassFactory from .callback import Callback from .callback_list import CallbackList -from vega.common.class_factory import ClassFactory __all__ = ["Callback", "CallbackList"] @@ -23,4 +23,5 @@ "fusion": ["trainer.callback:OperatorFusionCallback"], "horovod": ["trainer.callback:Horovod"], "hccl": ["trainer.callback:Hccl"], + "search_alg_callback": ["trainer.callback:SearchAlgorithmCallbacks"], }) diff --git a/vega/trainer/callbacks/callback.py b/vega/trainer/callbacks/callback.py index bb04006..1b546c7 100644 --- a/vega/trainer/callbacks/callback.py +++ b/vega/trainer/callbacks/callback.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Callbacks called at certain points of trainer.""" diff --git a/vega/trainer/callbacks/callback_list.py b/vega/trainer/callbacks/callback_list.py index 1235add..ef760af 100644 --- a/vega/trainer/callbacks/callback_list.py +++ b/vega/trainer/callbacks/callback_list.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Callbacks called at certain points of trainer.""" @@ -71,7 +77,7 @@ def _get_callbacks(self, customs, disables): if vega.is_torch_backend(): defaults = ["ModelStatistics", "MetricsEvaluator", "ModelCheckpoint", "ModelBuilder", "PerformanceSaver", "RuntimeCallback", "LearningRateScheduler", "ProgressLogger", "ReportCallback", - "DdpTorch", "Horovod", "Hccl"] + "SearchAlgorithmCallbacks", "DdpTorch", "Horovod", "Hccl"] elif vega.is_tf_backend(): defaults = ["ModelStatistics", "MetricsEvaluator", "ModelCheckpoint", "ModelBuilder", "PerformanceSaver", "RuntimeCallback", "ProgressLogger", "ReportCallback", "Horovod", "Hccl"] diff --git a/vega/trainer/callbacks/ddp_torch.py b/vega/trainer/callbacks/ddp_torch.py index 6be7afe..4212dab 100644 --- a/vega/trainer/callbacks/ddp_torch.py +++ b/vega/trainer/callbacks/ddp_torch.py @@ -1,21 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Data parallel callback.""" import logging import torch import vega -from .callback import Callback from vega.common import ClassFactory, ClassType from vega.common.general import General +from .callback import Callback logger = logging.getLogger(__name__) diff --git a/vega/trainer/callbacks/detection_metrics_evaluator.py b/vega/trainer/callbacks/detection_metrics_evaluator.py index 58e5b8f..1a7025a 100644 --- a/vega/trainer/callbacks/detection_metrics_evaluator.py +++ b/vega/trainer/callbacks/detection_metrics_evaluator.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """DetectionMetricsEvaluator call defination.""" diff --git a/vega/trainer/callbacks/detection_progress_logger.py b/vega/trainer/callbacks/detection_progress_logger.py index 2a8c0db..b8bc48a 100644 --- a/vega/trainer/callbacks/detection_progress_logger.py +++ b/vega/trainer/callbacks/detection_progress_logger.py @@ -1,21 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """DetectionProgressLogger call defination.""" -from vega.trainer.callbacks.progress_logger import ProgressLogger import logging -from collections import OrderedDict -from vega.common import ClassFactory, ClassType import time +from collections import OrderedDict from prettytable import PrettyTable +from vega.common import ClassFactory, ClassType +from vega.trainer.callbacks.progress_logger import ProgressLogger @ClassFactory.register(ClassType.CALLBACK) diff --git a/vega/trainer/callbacks/fusion.py b/vega/trainer/callbacks/fusion.py index 8c4f077..19d6299 100644 --- a/vega/trainer/callbacks/fusion.py +++ b/vega/trainer/callbacks/fusion.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Callbacks called at certain points of trainer.""" import logging @@ -14,7 +20,7 @@ import vega from vega.common.class_factory import ClassFactory, ClassType from vega.trainer.callbacks.callback import Callback -from vega.modules.operators import Identity +from vega.modules.operators.ops import Identity if vega.is_torch_backend(): import torch diff --git a/vega/trainer/callbacks/hccl.py b/vega/trainer/callbacks/hccl.py index 1151ce7..91e44b9 100644 --- a/vega/trainer/callbacks/hccl.py +++ b/vega/trainer/callbacks/hccl.py @@ -1,20 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Data parallel callback.""" import logging import vega -from .callback import Callback from vega.common import ClassFactory, ClassType from vega.common.general import General +from .callback import Callback logger = logging.getLogger(__name__) diff --git a/vega/trainer/callbacks/horovod.py b/vega/trainer/callbacks/horovod.py index 073c1fe..3f042a0 100644 --- a/vega/trainer/callbacks/horovod.py +++ b/vega/trainer/callbacks/horovod.py @@ -1,19 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Data parallel callback.""" import logging import vega -from .callback import Callback from vega.common import ClassFactory, ClassType +from .callback import Callback logger = logging.getLogger(__name__) @@ -33,23 +39,14 @@ def before_train(self, logs=None): return if vega.is_torch_backend(): self._init_torch() - # elif vega.is_tf_backend(): - # self._init_tf() def _init_torch(self): import torch import horovod.torch as hvd hvd.broadcast_parameters(self.trainer.model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(self.trainer.optimizer, root_rank=0) - # torch.cuda.set_device(hvd.local_rank()) self.trainer._average_metrics = self._average_metrics - # def _init_tf(self): - # import horovod.tensorflow as hvd - # # hvd.init() - # # TODO horovod tf - # self.trainer.sess_config.gpu_options.visible_device_list = str(hvd.local_rank()) - def _average_metrics(self, metrics_results): import torch import horovod.torch as hvd diff --git a/vega/trainer/callbacks/lr_scheduler.py b/vega/trainer/callbacks/lr_scheduler.py index 3b76caa..e6aaa65 100644 --- a/vega/trainer/callbacks/lr_scheduler.py +++ b/vega/trainer/callbacks/lr_scheduler.py @@ -1,16 +1,22 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """LearningRateSchduler callback Defination.""" -from .callback import Callback from vega.common import ClassFactory, ClassType +from .callback import Callback @ClassFactory.register(ClassType.CALLBACK) @@ -38,6 +44,5 @@ def after_epoch(self, epoch, logs=None): def after_train_step(self, batch_index, logs=None): """Call after_train_step of the managed callbacks.""" if self.lr_scheduler and not self.lr_scheduler.by_epoch: - # step = self.trainer.batch_num_train * self.epoch + self.epoch + batch_index step = self.trainer.batch_num_train * self.epoch + batch_index self.lr_scheduler.step(epoch=step) diff --git a/vega/trainer/callbacks/metrics_evaluator.py b/vega/trainer/callbacks/metrics_evaluator.py index 4b948c4..d412541 100644 --- a/vega/trainer/callbacks/metrics_evaluator.py +++ b/vega/trainer/callbacks/metrics_evaluator.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ProgressLogger call defination.""" from copy import deepcopy -from .callback import Callback from vega.common import ClassFactory, ClassType +from .callback import Callback @ClassFactory.register(ClassType.CALLBACK) @@ -35,6 +41,7 @@ def before_train(self, logs=None): self.best_valid_perfs = None self.best_valid_changed = False self.summary_perfs = None + self.train_metrics = None self.perfs_cmp_mode = self.trainer.config.perfs_cmp_mode self.perfs_cmp_key = self.trainer.config.perfs_cmp_key # get_train_metric_after_epoch: detector or no need to get train_metrics after epoch diff --git a/vega/trainer/callbacks/model_builder.py b/vega/trainer/callbacks/model_builder.py index f2a9c0f..e3bc153 100644 --- a/vega/trainer/callbacks/model_builder.py +++ b/vega/trainer/callbacks/model_builder.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ModelCheckpoint callback defination.""" import logging import vega -from .callback import Callback from vega.common import Config from vega.common import ClassFactory, ClassType from vega.networks.model_config import ModelConfig from vega.model_zoo import ModelZoo +from .callback import Callback logger = logging.getLogger(__name__) diff --git a/vega/trainer/callbacks/model_checkpoint.py b/vega/trainer/callbacks/model_checkpoint.py index 6b9c9c8..cd118e5 100644 --- a/vega/trainer/callbacks/model_checkpoint.py +++ b/vega/trainer/callbacks/model_checkpoint.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ModelCheckpoint callback defination.""" @@ -15,9 +21,9 @@ import logging import numpy as np import vega -from .callback import Callback from vega.common import FileOps from vega.common import ClassFactory, ClassType +from .callback import Callback if vega.is_torch_backend(): import torch @@ -87,7 +93,6 @@ def _save_checkpoint(self, epoch): checkpoint_file = FileOps.join_path( self.trainer.get_local_worker_path(), self.trainer.checkpoint_file_name) logging.debug("Start Save Model, model_file=%s", self.trainer.model_pickle_file_name) - # save checkpoint if vega.is_torch_backend(): ckpt = { 'epoch': epoch, @@ -121,7 +126,6 @@ def _load_checkpoint(self): self.trainer.optimizer.load_state_dict(checkpoint["optimizer"]) self.trainer.lr_scheduler.load_state_dict(checkpoint["lr_scheduler"]) if self.trainer._resume_training: - # epoch = checkpoint["epoch"] self.trainer._start_epoch = checkpoint["epoch"] logging.info("Resume fully train, change start epoch to {}".format(self.trainer._start_epoch)) except Exception as e: diff --git a/vega/trainer/callbacks/model_statistics.py b/vega/trainer/callbacks/model_statistics.py index f5fb70c..c8ab2b2 100644 --- a/vega/trainer/callbacks/model_statistics.py +++ b/vega/trainer/callbacks/model_statistics.py @@ -1,19 +1,25 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ModelStatistics callback defination.""" import logging import vega -from .callback import Callback from vega.metrics import calc_model_flops_params, calc_forward_latency from vega.common import ClassFactory, ClassType +from .callback import Callback if vega.is_torch_backend(): import torch @@ -40,7 +46,6 @@ def before_train(self, logs=None): import tensorflow as tf datasets = self.trainer.valid_input_fn() data_iter = tf.compat.v1.data.make_one_shot_iterator(datasets) - # data_iter = self.trainer.valid_input_fn().make_one_shot_iterator() input_data, _ = data_iter.get_next() self.input = input_data[:1] elif vega.is_torch_backend(): @@ -51,7 +56,6 @@ def before_train(self, logs=None): elif isinstance(batch, list) and isinstance(batch[0], dict): self.input = batch[:1] else: - # classification self.input = batch[0][:1] break self.update_flops_params(logs=logs) diff --git a/vega/trainer/callbacks/model_tuner.py b/vega/trainer/callbacks/model_tuner.py index 1ea0c0e..47e1d19 100644 --- a/vega/trainer/callbacks/model_tuner.py +++ b/vega/trainer/callbacks/model_tuner.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ModleTuner callback defination.""" import logging @@ -31,28 +37,17 @@ def init_trainer(self, logs=None): def _reset_classifier_model(self): if vega.is_torch_backend(): - - # num_classes = ModelConfig.model_desc.backbone.n_class num_classes = ModelConfig.num_classes model = self.trainer.model out_features = num_classes - - # fix layers - # for param in model.parameters(): - # param.requires_grad = False - - # change head if "torch_vision_model" in ModelConfig.model_desc["modules"]: - # torchvision import torch.nn as nn in_features = model.fc.in_features model.fc = nn.Linear(in_features, out_features).cuda() else: - # vega in_features = model.fc.in_features from vega.modules.operators import ops model.fc = ops.Linear(in_features=in_features, out_features=out_features).cuda() - # TODO n_class ModelConfig.model_desc.backbone.n_class = num_classes logging.info("Model fine tuned successfully.") diff --git a/vega/trainer/callbacks/ms_callbacks.py b/vega/trainer/callbacks/ms_callbacks.py index 054a305..942705f 100644 --- a/vega/trainer/callbacks/ms_callbacks.py +++ b/vega/trainer/callbacks/ms_callbacks.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Custom callbacks used in mindspore.""" diff --git a/vega/trainer/callbacks/performance_saver.py b/vega/trainer/callbacks/performance_saver.py index 0716942..cbb0bce 100644 --- a/vega/trainer/callbacks/performance_saver.py +++ b/vega/trainer/callbacks/performance_saver.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """PerformanceSaver callback defination.""" import logging -from .callback import Callback from vega.common import ClassFactory, ClassType +from .callback import Callback @ClassFactory.register(ClassType.CALLBACK) diff --git a/vega/trainer/callbacks/progress_logger.py b/vega/trainer/callbacks/progress_logger.py index 50ee17b..d962323 100644 --- a/vega/trainer/callbacks/progress_logger.py +++ b/vega/trainer/callbacks/progress_logger.py @@ -1,21 +1,27 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """ProgressLogger call defination.""" import logging import statistics import time -import numpy as np from collections.abc import Iterable -from .callback import Callback +import numpy as np from vega.common import ClassFactory, ClassType +from .callback import Callback @ClassFactory.register(ClassType.CALLBACK) diff --git a/vega/trainer/callbacks/report_callback.py b/vega/trainer/callbacks/report_callback.py index 07c9ead..53c1e97 100644 --- a/vega/trainer/callbacks/report_callback.py +++ b/vega/trainer/callbacks/report_callback.py @@ -1,20 +1,26 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Report callback defination.""" import logging -from .callback import Callback +import vega from vega.report import ReportClient from vega.common import ClassFactory, ClassType -import vega +from .callback import Callback logger = logging.getLogger(__name__) diff --git a/vega/trainer/callbacks/runtime_callback.py b/vega/trainer/callbacks/runtime_callback.py index 4ff6889..16fdd79 100644 --- a/vega/trainer/callbacks/runtime_callback.py +++ b/vega/trainer/callbacks/runtime_callback.py @@ -1,17 +1,23 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Report callback defination.""" -from .callback import Callback from vega.common import ClassFactory, ClassType from vega.metrics.runtime_estimate import RuntimeEstimator +from .callback import Callback @ClassFactory.register(ClassType.CALLBACK) diff --git a/vega/trainer/callbacks/search_alg_callback.py b/vega/trainer/callbacks/search_alg_callback.py new file mode 100644 index 0000000..b6de1bc --- /dev/null +++ b/vega/trainer/callbacks/search_alg_callback.py @@ -0,0 +1,45 @@ +# -*- coding:utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Search Algorithm callback defination.""" +from vega.trainer.callbacks.callback import Callback +from vega.common import ClassFactory, ClassType + + +@ClassFactory.register(ClassType.CALLBACK) +class SearchAlgorithmCallbacks(Callback): + """Callback that saves the evaluated Performance.""" + + def __init__(self): + """Initialize ModleTuner callback.""" + super(SearchAlgorithmCallbacks, self).__init__() + self.priority = 300 + + def init_trainer(self, logs=None): + """Init model. Change head and Fix layers.""" + self.call_callback_fn("init_trainer", logs) + + def before_train(self, logs=None): + self.call_callback_fn("before_train", logs) + + def after_train(self, logs=None): + self.call_callback_fn("after_train", logs) + + def call_callback_fn(self, name, *args, **kwargs): + if not ClassFactory.is_exists(ClassType.CALLBACK_FN, name): + return + fn = ClassFactory.get_cls(ClassType.CALLBACK_FN, name) + fn(self.trainer, *args, **kwargs) diff --git a/vega/trainer/callbacks/timm_trainer_callback.py b/vega/trainer/callbacks/timm_trainer_callback.py index defa351..e103717 100644 --- a/vega/trainer/callbacks/timm_trainer_callback.py +++ b/vega/trainer/callbacks/timm_trainer_callback.py @@ -1,15 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """TIMM method trainer.""" +import logging import os import importlib import torch @@ -25,7 +32,7 @@ import apex from apex import amp except Exception: - pass + logging.debug('apex is no installed.') import horovod.torch as hvd import vega from vega.common import Config @@ -77,8 +84,6 @@ def create_loader( sampler = torch.utils.data.distributed.DistributedSampler( dataset, num_replicas=world_size, rank=rank) else: - # This will add extra duplicate entries to result in equal num - # of samples per-process, will slightly alter validation results sampler = OrderedDistributedSampler(dataset, num_replicas=world_size, rank=rank) if collate_fn is None: @@ -171,7 +176,7 @@ def after_epoch(self, epoch, logs=None): if self.trainer.is_chief: self.trainer._backup() - def _init_all_settings(self): # noqa: C901 + def _init_all_settings(self): """Init all settings from config.""" self.config = self.trainer.config if self.trainer.hps and self.trainer.hps.get('trainer'): @@ -200,8 +205,6 @@ def _init_all_settings(self): # noqa: C901 self.trainer.valid_metrics = self.trainer._init_metrics(None) self.trainer.callbacks._set_params(self.trainer) - # self.trainer.has_built = True - def _init_model_ema(self): """Init Model Ema.""" args = self.config.model_ema diff --git a/vega/trainer/callbacks/visual_callback.py b/vega/trainer/callbacks/visual_callback.py index 62b2ac1..9b671f0 100644 --- a/vega/trainer/callbacks/visual_callback.py +++ b/vega/trainer/callbacks/visual_callback.py @@ -1,22 +1,28 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Visual callback definition.""" import logging -import vega import numpy as np -from .callback import Callback +import vega from vega.common import ClassFactory, ClassType from vega.common import TaskOps from vega.visual.tensorboarder import SummaryBoard +from .callback import Callback def _flat_items(data, parents=tuple()): @@ -55,8 +61,6 @@ def before_train(self, logs=None): """Fetch trainer info before train stage.""" self._fix_path = "_".join([self.trainer.step_name, str(self.trainer.worker_id)]) self.summary = SummaryBoard(self._archive_root, self._fix_path) - - # add graph only once. if vega.is_tf_backend(): import tensorflow as tf datasets = self.trainer.valid_input_fn() @@ -96,8 +100,6 @@ def after_epoch(self, epoch, logs=None): """Collect data after epoch, and 'after_epoch' data could contains 'after_valid'.""" readable_records = make_keys_readable(logs) self.summary.insert_epoch_logs(readable_records, epoch) - - # update info info_records = [("/".join(["info", k]), self._info[k]) for k in self._need_keys] self.summary.insert_epoch_logs(info_records, epoch) @@ -136,5 +138,4 @@ def _fetch_tf_graph(model, input): sess.run(tf.global_variables_initializer()) sess.run(out, feed_dict={dummy_input: np.ones(input.shape.as_list())}) - # print(np.shape(o), o) return graph diff --git a/vega/trainer/conf.py b/vega/trainer/conf.py index b096ad2..5a87aa6 100644 --- a/vega/trainer/conf.py +++ b/vega/trainer/conf.py @@ -1,20 +1,26 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default configs.""" import os +from vega.common import ConfigSerializable from .modules.conf.loss import LossConfig from .modules.conf.lr_scheduler import LrSchedulerConfig from .modules.conf.optim import OptimConfig -from vega.common import ConfigSerializable from .task_conf import DEFAULT_CONFIG @@ -77,7 +83,6 @@ class TrainerConfig(ConfigSerializable): # TODO: need to delete limits = None init_model_file = None - pareto_front_file = None unrolled = True model_desc_file = None codec = None @@ -110,6 +115,7 @@ class TrainerConfig(ConfigSerializable): eval_per_epoch = True # script runner script = None + use_dag_forward = False @classmethod def set_task(cls, task): @@ -155,7 +161,6 @@ def rules(cls): "metric": {"type": dict}, "limits": {"type": (dict, None)}, "init_model_file": {"type": (str, None)}, - "pareto_front_file": {"type": (str, None)}, "unrolled": {"type": bool}, "model_desc_file": {"type": (str, None)}, "codec": {"type": (str, dict, None)}, diff --git a/vega/trainer/deserialize.py b/vega/trainer/deserialize.py index 4b29c4e..6e961b9 100644 --- a/vega/trainer/deserialize.py +++ b/vega/trainer/deserialize.py @@ -1,18 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Deserialize worker.""" import os -import pickle from copy import deepcopy +from vega.common import FileOps def _get_worker_config(worker): @@ -43,23 +49,25 @@ def pickle_worker(workers, id): config_file = os.path.join( worker.get_local_worker_path(), f".{str(id)}.{str(index)}.config.pkl") - with open(config_file, "wb") as f: - pickle.dump(worker_config, f) + FileOps.dump_pickle(worker_config, config_file) # pickle worker worker_file = os.path.join( worker.get_local_worker_path(), f".{str(id)}.{str(index)}.worker.pkl") - with open(worker_file, "wb") as f: - pickle.dump(worker, f) + FileOps.dump_pickle(worker, worker_file) def load_config(config_file): """Load config from file.""" - import pickle - - with open(config_file, 'rb') as f: - config = pickle.load(f) + # load General config (includes security setting) + from vega.common.general import General + General.security = False + config = FileOps.load_pickle(config_file) + General.from_dict(config["general"]) + # if security mode, reload config + if General.security: + config = FileOps.load_pickle(config_file) from vega.common.class_factory import ClassFactory from vega.common.general import General from vega.datasets.conf.dataset import DatasetConfig @@ -69,7 +77,6 @@ def load_config(config_file): from vega.core.pipeline.conf import PipeStepConfig ClassFactory.__registry__ = config["class_factory"] - General.from_dict(config["general"]) DatasetConfig.from_dict(config["dataset"]) ModelConfig.from_dict(config["model"]) TrainerConfig.from_dict(config["trainer"]) @@ -79,7 +86,5 @@ def load_config(config_file): def load_worker(worker_file): """Load worker from file.""" - import pickle - with open(worker_file, 'rb') as f: - worker = pickle.load(f) + worker = FileOps.load_pickle(worker_file) return worker diff --git a/vega/trainer/distributed_worker.py b/vega/trainer/distributed_worker.py index 3b5f52e..3a9f0c4 100644 --- a/vega/trainer/distributed_worker.py +++ b/vega/trainer/distributed_worker.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Distributed worker for training and evaluating. @@ -15,7 +21,7 @@ function of each distributed worker on local node, it also has the function of timeout, killing the worker process which exceeds setting time. """ -from vega.common.task_ops import TaskOps +from vega.common import TaskOps from vega.common.general import General diff --git a/vega/trainer/modules/conf/loss.py b/vega/trainer/modules/conf/loss.py index 4a231c4..29f024e 100644 --- a/vega/trainer/modules/conf/loss.py +++ b/vega/trainer/modules/conf/loss.py @@ -1,12 +1,18 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default loss configs.""" from vega.common import ConfigSerializable import vega @@ -53,7 +59,6 @@ class LossMappingDict(object): params_mapping_dict = dict( CrossEntropyLoss=dict( ignore_index=dict(torch='ignore_index', tf='ignore_index', ms=None), - # is_grad=dict(torch=None, tf=None, ms='is_grad'), sparse=dict(torch=None, tf=None, ms='sparse'), ), MixAuxiliaryLoss=dict( diff --git a/vega/trainer/modules/conf/lr_scheduler.py b/vega/trainer/modules/conf/lr_scheduler.py index dddc0a5..2dfa9c5 100644 --- a/vega/trainer/modules/conf/lr_scheduler.py +++ b/vega/trainer/modules/conf/lr_scheduler.py @@ -1,12 +1,18 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default lr_scheduler configs.""" from vega.common import ConfigSerializable diff --git a/vega/trainer/modules/conf/optim.py b/vega/trainer/modules/conf/optim.py index 7d61462..962d82d 100644 --- a/vega/trainer/modules/conf/optim.py +++ b/vega/trainer/modules/conf/optim.py @@ -1,12 +1,18 @@ # -*- coding=utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default optimizer configs.""" from vega.common import ConfigSerializable diff --git a/vega/trainer/modules/config_bakcend_map.py b/vega/trainer/modules/config_bakcend_map.py index 9a0997c..4f82a0d 100644 --- a/vega/trainer/modules/config_bakcend_map.py +++ b/vega/trainer/modules/config_bakcend_map.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Modules Config Mapping according to specific backend.""" diff --git a/vega/trainer/modules/lr_schedulers/__init__.py b/vega/trainer/modules/lr_schedulers/__init__.py index ec8b966..8db0aa5 100644 --- a/vega/trainer/modules/lr_schedulers/__init__.py +++ b/vega/trainer/modules/lr_schedulers/__init__.py @@ -1,5 +1,5 @@ -from .lr_scheduler import LrScheduler import vega +from .lr_scheduler import LrScheduler if vega.is_torch_backend(): from .warmup_scheduler_torch import WarmupScheduler diff --git a/vega/trainer/modules/lr_schedulers/ca_restart_tf.py b/vega/trainer/modules/lr_schedulers/ca_restart_tf.py index e2c6485..e2ba215 100644 --- a/vega/trainer/modules/lr_schedulers/ca_restart_tf.py +++ b/vega/trainer/modules/lr_schedulers/ca_restart_tf.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Cosine annealing restart lr scheduler.""" import math diff --git a/vega/trainer/modules/lr_schedulers/cosine_annealing.py b/vega/trainer/modules/lr_schedulers/cosine_annealing.py index dee493d..afd212b 100644 --- a/vega/trainer/modules/lr_schedulers/cosine_annealing.py +++ b/vega/trainer/modules/lr_schedulers/cosine_annealing.py @@ -1,16 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Cosine annealing lr scheduler.""" -import tensorflow as tf import math +import tensorflow as tf from vega.common import ClassFactory, ClassType diff --git a/vega/trainer/modules/lr_schedulers/lr_scheduler.py b/vega/trainer/modules/lr_schedulers/lr_scheduler.py index 27ab132..56f5eba 100644 --- a/vega/trainer/modules/lr_schedulers/lr_scheduler.py +++ b/vega/trainer/modules/lr_schedulers/lr_scheduler.py @@ -1,16 +1,22 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Manage LrScheduler class.""" import logging -import vega from copy import deepcopy +import vega from vega.common import ClassFactory, ClassType from vega.common.config import Config from ..config_bakcend_map import ConfigBackendMapping diff --git a/vega/trainer/modules/lr_schedulers/ms_lr_scheduler.py b/vega/trainer/modules/lr_schedulers/ms_lr_scheduler.py index 678c42c..e337939 100644 --- a/vega/trainer/modules/lr_schedulers/ms_lr_scheduler.py +++ b/vega/trainer/modules/lr_schedulers/ms_lr_scheduler.py @@ -1,17 +1,23 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Cosine annealing lr scheduler.""" -from vega.common import ClassFactory, ClassType -import numpy as np import math +import numpy as np +from vega.common import ClassFactory, ClassType @ClassFactory.register(ClassType.LR_SCHEDULER) diff --git a/vega/trainer/modules/lr_schedulers/multistep.py b/vega/trainer/modules/lr_schedulers/multistep.py index 27c5256..7d0f8ef 100644 --- a/vega/trainer/modules/lr_schedulers/multistep.py +++ b/vega/trainer/modules/lr_schedulers/multistep.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Multi step warm up lr scheduler.""" diff --git a/vega/trainer/modules/lr_schedulers/scheduler_dict.py b/vega/trainer/modules/lr_schedulers/scheduler_dict.py index 8a40167..4b1a114 100644 --- a/vega/trainer/modules/lr_schedulers/scheduler_dict.py +++ b/vega/trainer/modules/lr_schedulers/scheduler_dict.py @@ -1,9 +1,15 @@ -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Manage LrScheduler class.""" diff --git a/vega/trainer/modules/lr_schedulers/step_lr.py b/vega/trainer/modules/lr_schedulers/step_lr.py index da86184..d2bbb00 100644 --- a/vega/trainer/modules/lr_schedulers/step_lr.py +++ b/vega/trainer/modules/lr_schedulers/step_lr.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Multi step warm up lr scheduler.""" import tensorflow as tf diff --git a/vega/trainer/modules/lr_schedulers/warmup_scheduler_tf.py b/vega/trainer/modules/lr_schedulers/warmup_scheduler_tf.py index 1ca661d..97f3061 100644 --- a/vega/trainer/modules/lr_schedulers/warmup_scheduler_tf.py +++ b/vega/trainer/modules/lr_schedulers/warmup_scheduler_tf.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Basic Warm up lr scheduler. diff --git a/vega/trainer/modules/lr_schedulers/warmup_scheduler_torch.py b/vega/trainer/modules/lr_schedulers/warmup_scheduler_torch.py index 239fbc5..595df22 100644 --- a/vega/trainer/modules/lr_schedulers/warmup_scheduler_torch.py +++ b/vega/trainer/modules/lr_schedulers/warmup_scheduler_torch.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Basic Warm up lr scheduler. diff --git a/vega/trainer/modules/optimizer/multi_optimizer.py b/vega/trainer/modules/optimizer/multi_optimizer.py index 312db07..99fb524 100644 --- a/vega/trainer/modules/optimizer/multi_optimizer.py +++ b/vega/trainer/modules/optimizer/multi_optimizer.py @@ -1,9 +1,15 @@ -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Manage LrScheduler class.""" from collections import OrderedDict diff --git a/vega/trainer/modules/optimizer/optim.py b/vega/trainer/modules/optimizer/optim.py index 7e7c0fa..5a50be6 100644 --- a/vega/trainer/modules/optimizer/optim.py +++ b/vega/trainer/modules/optimizer/optim.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Manage LrScheduler class.""" @@ -14,10 +20,10 @@ import logging import vega from vega.common import ClassFactory, ClassType -from ..config_bakcend_map import ConfigBackendMapping -from ..conf.optim import OptimConfig, OptimMappingDict from vega.common.config import Config from vega.common.general import General +from ..config_bakcend_map import ConfigBackendMapping +from ..conf.optim import OptimConfig, OptimMappingDict class Optimizer(object): @@ -128,7 +134,7 @@ def set_distributed(cls, optimizer, model=None): ClassFactory.register_cls(NpuFusedSGD, ClassType.OPTIMIZER) except Exception: - pass + logging.debug('apex of NPU is not installed.') elif vega.is_tf_backend(): import tensorflow.compat.v1.train as tf_train diff --git a/vega/trainer/modules/optimizer/optimizer.py b/vega/trainer/modules/optimizer/optimizer.py index 6cdc975..03a7b8b 100644 --- a/vega/trainer/modules/optimizer/optimizer.py +++ b/vega/trainer/modules/optimizer/optimizer.py @@ -1,76 +1,82 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""TF Adam.""" - - -class OptimizerStep(object): - """Adam optimizer for tensorflow.""" - - def __init__(self, learning_rate, weight_decay=0.): - self.weight_decay = weight_decay - self.base_lr = learning_rate - - def set_lr(self, learning_rate): - """Uptate learning rate of optimizer.""" - if hasattr(self, '_learning_rate'): - self._learning_rate = learning_rate - elif hasattr(self, '_lr'): - self._lr = learning_rate - - def step(self, loss, loss_scale, global_step, var_list=None): - """Compute and update gradients.""" - loss = loss + self.regularize_loss(loss) - if loss_scale != 1: - scaled_grad_vars = self.compute_gradients(loss * loss_scale, var_list=var_list) - unscaled_grad_vars = [] - for grad, var in scaled_grad_vars: - unscaled_grad_vars.append((grad, var) if grad is None else (grad / loss_scale, var)) - minimize_op = self.apply_gradients(unscaled_grad_vars, global_step) - else: - grad_vars = self.compute_gradients(loss, var_list=var_list) - minimize_op = self.apply_gradients(grad_vars, global_step) - return minimize_op - - def regularize_loss(self, loss): - """Compute and return l2 loss.""" - import tensorflow as tf - l2_loss_list = [tf.nn.l2_loss(v) for v in tf.compat.v1.trainable_variables() - if 'batch_normalization' not in v.name] - loss = loss + self.weight_decay * tf.add_n(l2_loss_list) - return loss - - -def dynamic_optimizer(optimizer_class, **params): - """Dynamically choose optimizer.""" - class DynamicOptimizer(optimizer_class, OptimizerStep): - """Dynamic optimizer for tensorflow.""" - - def __init__(self, **kwargs): - weight_decay = 0. - learning_rate = 0. - if 'weight_decay' in kwargs: - weight_decay = kwargs.pop('weight_decay') - if 'learning_rate' in kwargs: - learning_rate = kwargs['learning_rate'] - optimizer_class.__init__(self, **kwargs) - OptimizerStep.__init__(self, learning_rate=learning_rate, weight_decay=weight_decay) - return DynamicOptimizer(**params) - - -def dynamic_distributed_optimizer(optimizer_class, optimizer): - """Dynamically choose distributed optimizer.""" - class DynamicDistributedOptimizer(optimizer_class, OptimizerStep): - """Dynamic distributed optimizer for tensorflow.""" - - def __init__(self, optimizer): - optimizer_class.__init__(self, optimizer) - OptimizerStep.__init__(self, learning_rate=optimizer.base_lr, weight_decay=optimizer.weight_decay) - return DynamicDistributedOptimizer(optimizer) +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""TF Adam.""" + + +class OptimizerStep(object): + """Adam optimizer for tensorflow.""" + + def __init__(self, learning_rate, weight_decay=0.): + self.weight_decay = weight_decay + self.base_lr = learning_rate + + def set_lr(self, learning_rate): + """Uptate learning rate of optimizer.""" + if hasattr(self, '_learning_rate'): + self._learning_rate = learning_rate + elif hasattr(self, '_lr'): + self._lr = learning_rate + + def step(self, loss, loss_scale, global_step, var_list=None): + """Compute and update gradients.""" + loss = loss + self.regularize_loss(loss) + if loss_scale != 1: + scaled_grad_vars = self.compute_gradients(loss * loss_scale, var_list=var_list) + unscaled_grad_vars = [] + for grad, var in scaled_grad_vars: + unscaled_grad_vars.append((grad, var) if grad is None else (grad / loss_scale, var)) + minimize_op = self.apply_gradients(unscaled_grad_vars, global_step) + else: + grad_vars = self.compute_gradients(loss, var_list=var_list) + minimize_op = self.apply_gradients(grad_vars, global_step) + return minimize_op + + def regularize_loss(self, loss): + """Compute and return l2 loss.""" + import tensorflow as tf + l2_loss_list = [tf.nn.l2_loss(v) for v in tf.compat.v1.trainable_variables() + if 'batch_normalization' not in v.name] + loss = loss + self.weight_decay * tf.add_n(l2_loss_list) + return loss + + +def dynamic_optimizer(optimizer_class, **params): + """Dynamically choose optimizer.""" + class DynamicOptimizer(optimizer_class, OptimizerStep): + """Dynamic optimizer for tensorflow.""" + + def __init__(self, **kwargs): + weight_decay = 0. + learning_rate = 0. + if 'weight_decay' in kwargs: + weight_decay = kwargs.pop('weight_decay') + if 'learning_rate' in kwargs: + learning_rate = kwargs['learning_rate'] + optimizer_class.__init__(self, **kwargs) + OptimizerStep.__init__(self, learning_rate=learning_rate, weight_decay=weight_decay) + return DynamicOptimizer(**params) + + +def dynamic_distributed_optimizer(optimizer_class, optimizer): + """Dynamically choose distributed optimizer.""" + class DynamicDistributedOptimizer(optimizer_class, OptimizerStep): + """Dynamic distributed optimizer for tensorflow.""" + + def __init__(self, optimizer): + optimizer_class.__init__(self, optimizer) + OptimizerStep.__init__(self, learning_rate=optimizer.base_lr, weight_decay=optimizer.weight_decay) + return DynamicDistributedOptimizer(optimizer) diff --git a/vega/trainer/run_remote_worker.py b/vega/trainer/run_remote_worker.py index ee7e7ad..d8bf86b 100644 --- a/vega/trainer/run_remote_worker.py +++ b/vega/trainer/run_remote_worker.py @@ -1,24 +1,32 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Run worker remotely.""" import os import sys -import psutil import logging -import subprocess import traceback +import subprocess import signal +import psutil import vega +from vega.common.general import General from vega.trainer.deserialize import load_config, load_worker +from vega import security def run_remote_worker(worker_id, worker_path, id, num_workers): @@ -29,12 +37,12 @@ def run_remote_worker(worker_id, worker_path, id, num_workers): log_path=worker_path) for index in range(num_workers): os.chdir(os.environ["PWD"]) - if 'PYTHONPATH' in os.environ: - os.environ['PYTHONPATH'] = "{}:{}:{}".format( - os.environ['PYTHONPATH'], worker_path, os.path.abspath(os.curdir)) - elif worker_id is not None and worker_path is not None: - os.environ['PYTHONPATH'] = "{}:{}".format( - worker_path, os.path.abspath(os.curdir)) + if "PYTHONPATH" not in os.environ: + os.environ["PYTHONPATH"] = "" + if worker_path is not None and worker_path not in os.environ["PYTHONPATH"].split(":"): + os.environ["PYTHONPATH"] += f":{worker_path}" + if os.path.abspath(os.curdir) not in os.environ["PYTHONPATH"].split(":"): + os.environ["PYTHONPATH"] += f":{os.path.abspath(os.curdir)}" if vega.is_gpu_device(): sub_pid_list = call_in_gpu(id, worker_id, worker_path, index) @@ -70,7 +78,7 @@ def kill_proc_tree(pid, sig=signal.SIGKILL, include_parent=True, gone, alive = psutil.wait_procs(children, timeout=timeout, callback=on_terminate) except Exception: - pass + logging.debug('Failed to a process tree.') return (gone, alive) @@ -130,9 +138,9 @@ def _subprocess(id, worker_id, worker_path, rank, is_backend, index): env=os.environ.copy()) pid = proc.pid proc.wait(timeout=int(os.environ["vega_timeout"])) - except Exception: - logging.warn("Timeout worker has been killed.") - logging.warn(traceback.print_exc()) + except Exception as e: + logging.warn(f"Timeout worker has been killed, message: {e}.") + logging.debug(traceback.print_exc()) return pid @@ -142,12 +150,15 @@ def run_worker(): vega.set_backend(os.environ["BACKEND_TYPE"].lower(), os.environ["DEVICE_CATEGORY"]) (config_file, worker_file) = sys.argv[1:] load_config(config_file) - # cmd += os.environ["vega_init_env"] if "vega_init_env" in os.environ else "" + if General.security: + if not security.load_config("client"): + return + os.umask(0o077) worker = load_worker(worker_file) worker.train_process() - except Exception: - traceback.print_exc(file=open("./error.log", "w+")) - logging.error(traceback.format_exc()) + except Exception as e: + logging.debug(traceback.format_exc()) + logging.error(f"Failed to run worker, message: {e}") if __name__ == "__main__": diff --git a/vega/trainer/script_runner.py b/vega/trainer/script_runner.py index 301ff3e..afeb586 100644 --- a/vega/trainer/script_runner.py +++ b/vega/trainer/script_runner.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Base Trainer.""" @@ -14,7 +20,6 @@ import subprocess import traceback import os -import pickle import glob from vega.common import Config from vega.common.general import General @@ -23,6 +28,7 @@ from vega.trainer.conf import TrainerConfig from vega.common.class_factory import ClassFactory, ClassType from vega.trainer.utils import WorkerTypes +from vega.common import FileOps logger = logging.getLogger(__name__) @@ -48,9 +54,9 @@ def train_process(self): try: self._dump_trial_config() self._run_script() - except Exception: - logger.error(traceback.format_exc()) - logger.error("Failed to run script.") + except Exception as e: + logger.debug(traceback.format_exc()) + logger.error(f"Failed to run script, message: {e}.") def _run_script(self): """Run script.""" @@ -58,14 +64,15 @@ def _run_script(self): script = os.path.abspath(self.config.script) cmd = [General.python_command, script] if hasattr(self.config, "params") and self.config.params is not None: - cmd = [General.python_command, self.config.params] + params = [f"--{k}={v}" for k, v in self.config.params.items()] + cmd += params try: proc = subprocess.Popen(cmd, env=env, cwd=self.get_local_worker_path()) logger.info(f"start process, pid: {proc.pid}") proc.wait(timeout=General.worker.timeout) - except Exception: - logger.warn("Timeout worker has been killed.") - logger.warn(traceback.print_exc()) + except Exception as e: + logger.warn(f"Timeout worker has been killed, message: {e}.") + logger.debug(traceback.format_exc()) def _dump_trial_config(self): """Dump trial config.""" @@ -77,8 +84,7 @@ def _dump_trial_config(self): "epochs": self.config.epochs, } _file = os.path.join(self.get_local_worker_path(), ".trial") - with open(_file, "wb") as f: - pickle.dump(data, f) + FileOps.dump_pickle(data, _file) def _get_hps(self, hps): if hps is not None: diff --git a/vega/trainer/simclr/loss.py b/vega/trainer/simclr/loss.py index e94e91d..3e79978 100644 --- a/vega/trainer/simclr/loss.py +++ b/vega/trainer/simclr/loss.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """NT_Xent loss for simclr.""" import torch diff --git a/vega/trainer/simclr/model.py b/vega/trainer/simclr/model.py index adaae1d..f89df38 100644 --- a/vega/trainer/simclr/model.py +++ b/vega/trainer/simclr/model.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Build model for simclr.""" import torch diff --git a/vega/trainer/simclr/train.py b/vega/trainer/simclr/train.py index 21f46bd..9a356c0 100644 --- a/vega/trainer/simclr/train.py +++ b/vega/trainer/simclr/train.py @@ -1,20 +1,25 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Train simclr model.""" - -from .model import SimclrModel -from torch.optim import Adam +import logging import torch +from torch.optim import Adam +from .model import SimclrModel from .loss import NT_Xent -import logging def simclr_train(init_model, train_loader, epochs=1): diff --git a/vega/trainer/simclr/transforms.py b/vega/trainer/simclr/transforms.py index 0a1fcff..fd96fca 100644 --- a/vega/trainer/simclr/transforms.py +++ b/vega/trainer/simclr/transforms.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Transforms used in simclr.""" diff --git a/vega/trainer/task_conf.py b/vega/trainer/task_conf.py index e2a928c..07f05ba 100644 --- a/vega/trainer/task_conf.py +++ b/vega/trainer/task_conf.py @@ -1,12 +1,18 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Default task config.""" diff --git a/vega/trainer/trainer.py b/vega/trainer/trainer.py index 04907dc..bf09303 100644 --- a/vega/trainer/trainer.py +++ b/vega/trainer/trainer.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Trainer.""" diff --git a/vega/trainer/trainer_base.py b/vega/trainer/trainer_base.py index c910180..d0eff6f 100644 --- a/vega/trainer/trainer_base.py +++ b/vega/trainer/trainer_base.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Base Trainer.""" @@ -213,6 +219,20 @@ def _init_dataloader(self, mode, loader=None, transforms=None): """Init dataloader.""" if loader is not None: return loader + dataset = self._init_dataset(mode) + if transforms is not None: + dataset.transforms = transforms + if (self.hccl or self.horovod) and mode == "train": + dataset.set_distributed(self.num_workers, self.rank_id) + # adapt the dataset to specific backend + adapter = Adapter(dataset) + if (self.hccl or self.horovod) and mode == "train" and hasattr(adapter, "sampler"): + self.sampler = adapter.sampler + dataloader = adapter.loader + return dataloader + + def _init_dataset(self, mode): + """Init dataset.""" if mode == "train" and self.hps is not None and self.hps.get("dataset") is not None: if self.hps.get("dataset") and self.hps.get("dataset").get('type'): dataset_cls = ClassFactory.get_cls(ClassType.DATASET, self.hps.get("dataset").get('type')) @@ -229,16 +249,7 @@ def _init_dataloader(self, mode, loader=None, transforms=None): else: dataset_cls = ClassFactory.get_cls(ClassType.DATASET) dataset = dataset_cls(mode=mode) - if transforms is not None: - dataset.transforms = transforms - if (self.hccl or self.horovod) and mode == "train": - dataset.set_distributed(self.num_workers, self.rank_id) - # adapt the dataset to specific backend - adapter = Adapter(dataset) - if (self.hccl or self.horovod) and mode == "train" and hasattr(adapter, "sampler"): - self.sampler = adapter.sampler - dataloader = adapter.loader - return dataloader + return dataset def _train_loop(self): """Do the training with data, callbacks and step functions etc.""" diff --git a/vega/trainer/trainer_ms.py b/vega/trainer/trainer_ms.py index 4a33ec4..622f81e 100644 --- a/vega/trainer/trainer_ms.py +++ b/vega/trainer/trainer_ms.py @@ -1,29 +1,35 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Mindspore Trainer.""" import os +import logging +import vega from mindspore import context from mindspore.train import Model as MsModel from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, LossMonitor, TimeMonitor from mindspore.train.loss_scale_manager import FixedLossScaleManager from mindspore import save_checkpoint from vega.trainer.callbacks.ms_callbacks import EvalCallBack -import vega from vega.trainer.trainer_base import TrainerBase from vega.trainer.modules.optimizer import Optimizer from vega.trainer.modules.lr_schedulers import LrScheduler from vega.modules.loss import Loss from vega.common import ClassFactory, ClassType -import logging from vega.common.general import General @@ -44,7 +50,7 @@ def build(self): self.optimizer = Optimizer()(model=self.model, dynamic_lr=dynamic_lr, no_decay_params=no_decay_params) else: self.optimizer = Optimizer()(model=self.model, no_decay_params=no_decay_params) - logging.info(f"The optimizer is {self.optimizer}.") + logging.debug(f"The optimizer is {self.optimizer}.") if hasattr(self.model, 'add_loss'): loss_cls = Loss()() self.model.add_loss(loss_cls) diff --git a/vega/trainer/trainer_tf.py b/vega/trainer/trainer_tf.py index 1f32da4..880a89c 100644 --- a/vega/trainer/trainer_tf.py +++ b/vega/trainer/trainer_tf.py @@ -1,12 +1,18 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """TensorFlow Trainer.""" @@ -27,7 +33,6 @@ class TrainerTf(TrainerBase): def build(self): """Build the trainer by assembling the necessary components.""" super().build() - # Some trainer has different train batch size from valid batch self.train_metrics = None self.valid_metrics = self._init_metrics() @@ -106,7 +111,6 @@ def _default_model_fn(self, features, labels, mode): self.loss = self.model.overall_loss() else: self.loss = Loss()() - # loss if self.config.mixup and mode == tf.estimator.ModeKeys.TRAIN: loss = self._mixup_loss(self.loss, logits, y_a, y_b, mixup_ratio) else: @@ -209,13 +213,11 @@ def _init_npu_estimator(self, sess_config): def _init_gpu_session_config(self): sess_config = tf.compat.v1.ConfigProto() sess_config.gpu_options.allow_growth = True - # if self.horovod: - # import horovod.tensorflow as hvd - # sess_config.gpu_options.visible_device_list = str(hvd.local_rank()) return sess_config def _init_npu_session_config(self): from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig + from npu_bridge import npu_init sess_config = tf.ConfigProto() sess_config.graph_options.rewrite_options.remapping = RewriterConfig.OFF custom_op = sess_config.graph_options.rewrite_options.custom_optimizers.add() diff --git a/vega/trainer/trainer_torch.py b/vega/trainer/trainer_torch.py index fdd79ec..8c8689b 100644 --- a/vega/trainer/trainer_torch.py +++ b/vega/trainer/trainer_torch.py @@ -1,14 +1,21 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Torch Trainer.""" + import torch import numpy as np import vega @@ -71,13 +78,13 @@ def init_env(self): def _init_setting(self): """Init CUDA setting.""" if vega.is_gpu_device(): - import torch.cuda + import torch.cuda as torch_cuda self.config.device = vega.is_gpu_device() if vega.is_gpu_device() is not True else 0 - torch.cuda.manual_seed(self.config.seed) + torch_cuda.manual_seed(self.config.seed) elif vega.is_npu_device(): - import torch.npu - torch.npu.set_device(vega.get_devices()) - torch.npu.manual_seed(self.config.seed) + import torch.npu as torch_npu + torch_npu.set_device(vega.get_devices()) + torch_npu.manual_seed(self.config.seed) elif vega.is_cpu_device(): self.config.device = -1 return @@ -156,17 +163,7 @@ def _default_train_step(self, batch): else: loss = self.loss(output, target) if self.use_amp: - from apex import amp - if vega.is_npu_device(): - with amp.scale_loss(loss, self.optimizer) as scaled_loss: - scaled_loss.backward() - self.optimizer.step() - else: - with amp.scale_loss(loss, self.optimizer) as scaled_loss: - scaled_loss.backward() - self.optimizer.synchronize() - with self.optimizer.skip_synchronize(): - self.optimizer.step() + self._set_amp_loss(loss) else: loss.backward() if self.config.grad_clip: @@ -177,6 +174,19 @@ def _default_train_step(self, batch): 'train_batch_output': output, 'lr': self.lr_scheduler.get_lr()} + def _set_amp_loss(self, loss): + from apex import amp + if vega.is_npu_device(): + with amp.scale_loss(loss, self.optimizer) as scaled_loss: + scaled_loss.backward() + self.optimizer.step() + else: + with amp.scale_loss(loss, self.optimizer) as scaled_loss: + scaled_loss.backward() + self.optimizer.synchronize() + with self.optimizer.skip_synchronize(): + self.optimizer.step() + def _multi_train_step(self, batch): train_batch_output = None for opt_name, sub_opt in self.optimizer.get_opts(): diff --git a/vega/trainer/trial_agent.py b/vega/trainer/trial_agent.py index 90d3070..1c1ab2d 100644 --- a/vega/trainer/trial_agent.py +++ b/vega/trainer/trial_agent.py @@ -1,21 +1,26 @@ # -*- coding: utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Base Trainer.""" import os -import logging -import pickle import vega from vega.common.general import General from vega.report.report_client import ReportClient +from vega.common import FileOps class TrialAgent(object): @@ -28,8 +33,7 @@ def __init__(self): def _load_config(self): _file = os.path.join(os.path.curdir, ".trial") - with open(_file, "rb") as f: - data = pickle.load(f) + data = FileOps.load_pickle(_file) self.worker_id = data["worker_id"] self.model_desc = data["model_desc"] self.hps = data["hps"] diff --git a/vega/trainer/tuner.py b/vega/trainer/tuner.py new file mode 100644 index 0000000..2627aa3 --- /dev/null +++ b/vega/trainer/tuner.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. +# This program is free software; you can redistribute it and/or modify +# it under the terms of the MIT License. +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# MIT License for more details. + +"""Base Trainer.""" +import logging +from vega.common.class_factory import ClassFactory, ClassType +from vega.common.wrappers import train_process_wrapper +from vega.trainer.trainer_base import TrainerBase +from vega.model_zoo.tuner import ModelTuner + + +@ClassFactory.register(ClassType.TRAINER) +class Tuner(TrainerBase): + """Tuner to call user function.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + @train_process_wrapper + def train_process(self): + """Define train process.""" + fn_name, fn_kwargs = ModelTuner.get_fn() + ModelTuner.setup(self.step_name, self._worker_id) + if hasattr(self.config, "params") and self.config.params: + fn_kwargs.update(self.config.params) + logging.info("function args: {}".format(fn_kwargs)) + return fn_name(**fn_kwargs) diff --git a/vega/trainer/utils.py b/vega/trainer/utils.py index 967df80..e3c2aa7 100644 --- a/vega/trainer/utils.py +++ b/vega/trainer/utils.py @@ -1,23 +1,24 @@ # -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. """Utils functions that been used in pipeline.""" -import os import socket import logging -import signal -import psutil from enum import Enum -from vega.common import FileOps -from vega.common.task_ops import TaskOps class WorkerTypes(Enum): @@ -29,48 +30,6 @@ class WorkerTypes(Enum): DeviceEvaluator = 5 -# Here start the stand alone functions for master to use! -def clean_cuda_proc(master_pid, device_id): - """Short summary. - - :param type master_pid: Description of parameter `master_pid`. - :param type device_id: Description of parameter `device_id`. - """ - current_pid = os.getpid() - cuda_kill = "fuser -v /dev/nvidia{0} | " \ - "awk '{{for(i=1;i<=NF;i++)if($i!={1}&&$i!={2})" \ - "print \"kill -9 \" $i;}}' | sh".format(device_id, master_pid, current_pid) - os.system(cuda_kill) - return - - -def kill_proc_tree(pid, sig=signal.SIGKILL, include_parent=True, - timeout=None, on_terminate=None): - """Kill a process tree (including grandchildren) with signal. - - "sig" and return a (gone, still_alive) tuple. - "on_terminate", if specified, is a callabck function which is - called as soon as a child terminates. - """ - if pid == os.getpid(): - raise RuntimeError("I refuse to kill myself") - gone = None - alive = None - try: - parent = psutil.Process(pid) - children = parent.children(recursive=True) - if include_parent: - children.append(parent) - for p in children: - p.send_signal(sig) - gone, alive = psutil.wait_procs(children, timeout=timeout, - callback=on_terminate) - except Exception: - - pass - return (gone, alive) - - def get_master_address(args): """Get master address(ip, port) from `args.init_method`. @@ -84,7 +43,7 @@ def get_master_address(args): address = args.init_method[6:].split(":") ip = socket.gethostbyname(address[0]) port = address[-1] - logging.info("get master address, address={}, ip={}, port={}".format( + logging.debug("get master address, address={}, ip={}, port={}".format( address, ip, port )) return ip, port @@ -107,44 +66,6 @@ def get_local_address(): return ip -def save_master_ip(ip_address, port, args): - """Write the ip and port in a system path. - - :param str ip_address: The `ip_address` need to write. - :param str port: The `port` need to write. - :param argparse.ArgumentParser args: `args` is a argparse that should - contain `init_method`, `rank` and `world_size`. - - """ - temp_folder = TaskOps().temp_path - FileOps.make_dir(temp_folder) - file_path = os.path.join(temp_folder, 'ip_address.txt') - logging.info("write ip, file path={}".format(file_path)) - with open(file_path, 'w') as f: - f.write(ip_address + "\n") - f.write(port + "\n") - - -def load_master_ip(): - """Get the ip and port that write in a system path. - - here will not download anything from S3. - """ - temp_folder = TaskOps().temp_path - FileOps.make_dir(temp_folder) - file_path = os.path.join(temp_folder, 'ip_address.txt') - if os.path.isfile(file_path): - with open(file_path, 'r') as f: - ip = f.readline().strip() - port = f.readline().strip() - logging.info("get write ip, ip={}, port={}".format( - ip, port - )) - return ip, port - else: - return None, None - - def get_master_port(args): """Get master port from `args.init_method`. diff --git a/vega/visual/__init__.py b/vega/visual/__init__.py deleted file mode 100644 index 15324a1..0000000 --- a/vega/visual/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .train_process import dump_trainer_visual_info, dump_model_visual_info diff --git a/vega/visual/tensorboarder.py b/vega/visual/tensorboarder.py deleted file mode 100644 index eda0624..0000000 --- a/vega/visual/tensorboarder.py +++ /dev/null @@ -1,132 +0,0 @@ -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE -""" -Get vision by tensorboard. - - usage: - 1. tensorboard --logdir=/tmp/.xt_data/tensorboard - 2. and then, open chrome with url: http://YOUR.SERVER.IP:6006 - 3. if multi-scaler, you NEED re-run step-1 above!!! bugs -""" -import os -from datetime import datetime -from time import sleep -import numpy as np -import shutil -from absl import logging -from tensorboardX import SummaryWriter - - -def is_board_running(pro_name="tensorboard"): - """Check if process running.""" - cmd = ('ps aux | grep "' + pro_name + '" | grep -v grep | grep -v tail | grep -v keepH5ssAlive') - try: - process_num = len(os.popen(cmd).readlines()) - if process_num >= 1: - return True - else: - return False - except BaseException as err: - logging.warning("check process failed with {}.".format(err)) - return False - - -def clean_board_dir(_to_deleta_dir): - """Re-clear tensorboard dir.""" - if os.path.isdir(_to_deleta_dir): - shutil.rmtree(_to_deleta_dir, ignore_errors=True) - print("will clean path: {} for board...".format(_to_deleta_dir)) - sleep(0.01) - - -class SummaryBoard(object): - """SummaryBoard used for the visual base the tensorboardX.""" - - def __init__(self, archive_root, fixed_path=None): - """Init the summaryBoard, fixed_path could refer to worker_id.""" - self._archive = archive_root - if not os.path.isdir(archive_root): - os.makedirs(archive_root) - if not fixed_path: - self.logdir = os.path.join( - archive_root, datetime.now().strftime("%Y%m%d-%H%M%S") - ) - else: - self.logdir = os.path.join(archive_root, str(fixed_path)) - self.writer = SummaryWriter(logdir=self.logdir) - - def insert_records(self, records): - """Insert records.""" - for record in records: - name, value, index = record - if np.isnan(value) or not value: - continue - self.writer.add_scalar(name, value, index) - self.writer.flush() - - def insert_epoch_logs(self, logs, epoch): - """Insert logs after epoch.""" - for k, v in logs: - if not v: - continue - self.add_scalar(k, v, epoch, flush=False) - self.writer.flush() - - def add_scalar(self, name, value, index, walltime=None, flush=False): - """Add scalar func.""" - if walltime is not None: - self.writer.add_scalar(name, value, index, walltime=walltime) - else: - self.writer.add_scalar(name, value, index) - - if flush: - self.writer.flush() - - def add_graph(self, model=None, graph=None, feed_data=None, backend=None): - """Add graph.""" - if backend == "tf": - self._add_tf_graph(graph) - - elif backend == "torch": - self._add_torch_graph(model, feed_data) - - elif backend == "ms": - self._add_ms_graph() - else: - print("Add graph failed with non-known backend!") - - def _add_tf_graph(self, graph): - import tensorflow as tf - with graph.as_default(): - writer = tf.summary.FileWriter( - logdir=os.path.join(self.logdir, "model_def"), graph=graph) - writer.flush() - writer.close() - - def _add_torch_graph(self, model, feed_data): - self.writer.add_graph(model, (feed_data,)) - - def _add_ms_graph(self): - pass - - def close(self): - """Close SummaryBoard, contains file.close and process shutdown.""" - self.writer.flush() - self.writer.close() diff --git a/vega/visual/train_process.py b/vega/visual/train_process.py deleted file mode 100644 index a68ee81..0000000 --- a/vega/visual/train_process.py +++ /dev/null @@ -1,82 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. -# This program is free software; you can redistribute it and/or modify -# it under the terms of the MIT License. -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# MIT License for more details. - -"""Save Train processing info.""" -import logging -from tensorboardX import SummaryWriter -from vega.common import FileOps - - -def dump_trainer_visual_info(trainer, epoch, visual_data): - """Dump triner info to tensorboard event files. - - :param trainer: trainer. - :type worker: object that the class was inherited from DistributedWorker. - :param epoch: number of epoch. - :type epoch: int. - :param visual_data: train's visual data. - :type visual_data: ordered dictionary. - - """ - (visual, _, _, title, worker_id, output_path) = _get_trainer_info(trainer) - if visual is not True: - return - prefix_name = "{}".format(worker_id) - lines = {} - for _name, data in visual_data.items(): - line_name = "{}.{}".format(prefix_name, _name) - lines[line_name] = data - if len(lines) > 0: - writer = SummaryWriter(output_path, comment=title) - writer.add_scalars(title, lines, epoch) - writer.close() - - -def dump_model_visual_info(trainer, epoch, model, inputs): - """Dump model to tensorboard event files. - - :param trainer: trainer. - :type worker: object that the class was inherited from DistributedWorker. - :param model: model. - :type model: model. - :param inputs: input data. - :type inputs: data. - - """ - (_, visual, interval, title, worker_id, output_path) = _get_trainer_info(trainer) - if visual is not True: - return - if epoch % interval != 0: - return - title = str(worker_id) - _path = FileOps.join_path(output_path, title) - FileOps.make_dir(_path) - try: - with SummaryWriter(_path) as writer: - writer.add_graph(model, (inputs,)) - except Exception as e: - logging.error("Failed to dump model visual info, worker id: {}, epoch: {}, error: {}".format( - worker_id, epoch, str(e) - )) - - -def _get_trainer_info(trainer): - if "visualize" in trainer.cfg.keys(): - interval = trainer.cfg.visualize.model.interval - visual_process = trainer.cfg.visualize.train_process.visual - visual_model = trainer.cfg.visualize.model.visual - else: - interval = 10 - visual_process = True - visual_model = True - worker_id = trainer.worker_id - title = trainer.cfg.step_name - output_path = trainer.local_visual_path - return visual_process, visual_model, interval, title, worker_id, output_path diff --git a/vega/visual/visual_rewards.py b/vega/visual/visual_rewards.py deleted file mode 100644 index cfe9135..0000000 --- a/vega/visual/visual_rewards.py +++ /dev/null @@ -1,82 +0,0 @@ -#!/usr/bin/env python -"""Display reward and loss infomation into tensorboard.""" -import os - -from vega.visual.tensorboarder import clean_board_dir -from vega.visual.tensorboarder import SummaryBoard -from vega.common.util.default_xt import XtBenchmarkConf as bm_conf -from vega.common.util.get_xt_config import parse_xt_multi_case_paras -from vega.common.util.evaluate_xt import parse_benchmark_args -from vega.common.util.evaluate_xt import read_train_records, read_train_event_id - - -def display_rewards(args, stage): - """Create utils for display, support multi & single config file.""" - for _conf_file in args.config_file: - if not os.path.isfile(_conf_file): - print("config file: '{}' invalid, continue!".format(_conf_file)) - continue - - print("processing config file: '{}' ".format(_conf_file)) - multi_case_paras = parse_xt_multi_case_paras(_conf_file) - - for _once_paras in multi_case_paras: - handle_once_local_data_record(_once_paras, args.use_index, stage) - - -def parse_xt_train_config(yaml_obj): - """Create utils for parse xt config file.""" - env = yaml_obj.get("env_para") - alg = yaml_obj.get("alg_para") - _model = yaml_obj.get("model_para") - alg["model_info"] = _model - agent = yaml_obj.get("agent_para") - - return env, alg, agent - - -def handle_once_local_data_record(case_paras, use_index, stage="eval", - clear_tensorboard=True): - """Handle the record from local file.""" - env_info, alg_info, agent_info = parse_xt_train_config(case_paras) - # NOTE: model info will insert into alg_info, as "model_info" - benchmark_info = case_paras.get("benchmark", dict()) - - bm_args = parse_benchmark_args(env_info, alg_info, agent_info, benchmark_info) - - records = read_train_records(bm_args, use_index, stage) - - prefix_display_name = "_".join([env_info["env_name"], env_info["env_info"]["name"]]) - _train_event_id = read_train_event_id(bm_args) - case_tb_dir = "_".join( - [prefix_display_name, alg_info["alg_name"], str(_train_event_id)] - ) - print("case_tb_dir: ", case_tb_dir) - if clear_tensorboard: - clean_board_dir(os.path.join(bm_conf.default_tb_path, case_tb_dir)) - - write2board(stage, records, use_index, case_tb_dir) - - -def write2board(stage, record_dict, use_index, case_tb_dir): - """Write record into tensorboard, include, loss, reward etc.""" - if use_index == "step": - x_key = "sample_step" - elif use_index == "sec": - x_key = "elapsed_sec" - else: - raise KeyError("need in 'step' or 'sec', get: {}".format(use_index)) - - if stage == "eval": - display_list = ["eval_reward"] - elif stage == "both": - display_list = ["eval_reward", "train_reward"] - else: - raise KeyError("invalid stage para-{}".format(stage)) - - summary = SummaryBoard(archive_root=bm_conf.default_tb_path, fixed_path=case_tb_dir) - for name in display_list: - for x_val, value in zip(record_dict[x_key], record_dict[name]): - summary.add_scalar(name, value, x_val) - - del summary diff --git a/vega/visual/visualizer.ipynb b/vega/visual/visualizer.ipynb deleted file mode 100644 index 3ae8886..0000000 --- a/vega/visual/visualizer.ipynb +++ /dev/null @@ -1,496 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import os\n", - "import copy\n", - "import glob \n", - "import json\n", - "import numpy as np\n", - "import matplotlib as mpl\n", - "import matplotlib.pyplot as plt\n", - "import seaborn as sns\n", - "import pandas as pd\n", - "import logging\n", - "sns.set() # clear whole style\n", - "%matplotlib inline" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "check passed!\n" - ] - } - ], - "source": [ - "def make_col_readable(col_data, raw_key):\n", - " \"\"\"Make single column data readable.\"\"\"\n", - " k,m,g = 1024, 1024*1024, 1024*1024*1024\n", - " \n", - " col_mean = col_data.mean()\n", - " \n", - " if g <= col_mean:\n", - " new_col = col_data / g\n", - " new_key = \"{}({})\".format(raw_key, \"G\")\n", - " \n", - " if m <= col_mean < g:\n", - " new_col = col_data / m\n", - " new_key = \"{}({})\".format(raw_key, \"M\")\n", - " \n", - " elif k <= col_mean < m:\n", - " new_col = col_data / k\n", - " new_key = \"{}({})\".format(raw_key, \"K\")\n", - " else:\n", - " new_col, new_key = None, None\n", - " return new_col, new_key \n", - "\n", - "def make_human_readable(data, keys):\n", - " \"\"\"Make a pandas data readable.\"\"\"\n", - " updated_map = dict()\n", - " if not isinstance(keys, list):\n", - " keys = [keys]\n", - " for k in keys:\n", - " col_data = data[k]\n", - " updated_val, updated_key = make_col_readable(col_data, k)\n", - " if not updated_key: # \n", - " continue \n", - " \n", - " # add new column\n", - " data[updated_key] = updated_val\n", - " updated_map.update({k: updated_key})\n", - " \n", - " return data, updated_map\n", - "\n", - "\n", - "METRIC_MAP = {\n", - " \"Classification\": \"accuarcy\",\n", - " \"Super-Resolution\": {\"psnr\", \"ssim\"}, # and \n", - " \"Segmentation\": \"mIOU\",\n", - " \"Detection\": {\"F1 score\", \"LAMR\"}, # or \n", - " \"Click-Through Rate Prediction\": \"AUC\",\n", - "}\n", - "\n", - "PARAMS_SET = set([\"model_size\", \"metric\", \"params\", \"flops\", \"Inference Time\"])\n", - "\n", - "def get_params_set(case_type, default_params, metric_map):\n", - " \"\"\"Get params set.\"\"\"\n", - " ret_params = copy.deepcopy(default_params)\n", - " if \"metric\" in default_params:\n", - " ret_params.remove(\"metric\")\n", - " metric_keys = metric_map.get(case_type)\n", - " if not metric_keys:\n", - " raise KeyError(\"not found case: {} in map: {}.\".format(case_type, metric_map))\n", - " elif isinstance(metric_keys, str):\n", - " ret_params.add(metric_keys)\n", - " elif isinstance(metric_keys, set):\n", - " ret_params |= metric_keys\n", - " else:\n", - " raise ValueError(\"unkown value: {}\".format(metric_keys))\n", - " \n", - " return ret_params\n", - "\n", - "def test_get_params():\n", - " detec_target = {'params', 'F1 score', 'model_size', 'Inference Time', 'flops', 'LAMR'}\n", - " assert get_params_set(\"Detection\", PARAMS_SET, METRIC_MAP) == detec_target\n", - " \n", - " classif_target = {'params', 'accuarcy', 'model_size', 'Inference Time', 'flops'}\n", - " assert get_params_set(\"Classification\", PARAMS_SET, METRIC_MAP) == classif_target\n", - " print(\"check passed!\")\n", - " \n", - "if __name__ == \"__main__\":\n", - " test_get_params()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "def _get_json_val(json_file):\n", - " \"\"\"Get json value.\"\"\"\n", - " with open(json_file, \"r\") as json_obj:\n", - " single_val = json.load(json_obj)\n", - " return single_val\n", - "\n", - "def gather_metrics_from_dir(dir_path, case_type):\n", - " \"\"\"Collect the results under path.\"\"\"\n", - " json_files = glob.glob(os.path.join(dir_path, \"*.json\"))\n", - " need_keys = get_params_set(case_type, PARAMS_SET, METRIC_MAP) \n", - " # print(\"json_files: \",json_files)\n", - " ret_val_total = list()\n", - " \n", - " # try first json with keys check\n", - " single_val = _get_json_val(json_files[0])\n", - " unused_key = set([_k for _k in need_keys if _k not in single_val])\n", - " need_keys -= unused_key\n", - " \n", - " # move set to list, own to order\n", - " need_keys = list(need_keys)\n", - " for _json_file in json_files:\n", - " json_value = _get_json_val(_json_file)\n", - " single_val = list([float(json_value[k]) for k in need_keys])\n", - " \n", - " # add json_file_name, consider the type between json file and data \n", - " single_val.append(_json_file)\n", - " ret_val_total.append(single_val)\n", - " \n", - " ret_np = np.array(ret_val_total)\n", - " \n", - " ret_pd = pd.DataFrame(ret_np, columns=[*need_keys, \"json_name\"])\n", - " ret_pd[need_keys] = ret_pd[need_keys].apply(pd.to_numeric)\n", - " ret_pd.sort_values(\"model_size\", inplace=True)\n", - " return ret_pd" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [], - "source": [ - "def plot_pruning_case(pdf, x_label, y_label, to_png_file=\"defaut.png\", dpi=300):\n", - " sns.set()\n", - " sns.set(rc={\"figure.figsize\": (6.4, 3.6), \"figure.dpi\": 300})\n", - " # sns.set_style(\"white\")\n", - " # sns.set_context(\"paper\", font_scale=1.5, rc={\"lines.linewidth\":0.5})\n", - " sns.set_style(\"dark\")\n", - "\n", - " fig,axes = plt.subplots(nrows=2,ncols=2,figsize=(6.4,3.6))\n", - " fig.subplots_adjust(hspace=0.1, wspace=0.25)\n", - " plt.ticklabel_format(style='plain', axis='y')\n", - "\n", - " for count, (x, y, ax) in enumerate(zip(x_label, y_label, axes.reshape(-1))): \n", - " ax= sns.scatterplot(x=x, y=y,data=pdf,ax=ax)\n", - " ax.set_xlabel(x,fontsize=6)\n", - " ax.set_ylabel(y,fontsize=6)\n", - "\n", - " ax.tick_params(axis='y',labelsize=4, rotation=0)\n", - " ax.tick_params(axis='x',labelsize=4, rotation=45)\n", - " # ax.ticklabel_format(style='plain', axis='y',useOffset=False)\n", - "\n", - " if count < 2:\n", - " ax.set(xlabel=None)\n", - " ax.set(xticklabels=[])\n", - "\n", - " fig.savefig(to_png_file, dpi=dpi, bbox_inches='tight')\n", - " return to_png_file" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Parameter description\n", - "\n", - "- model_size (y) for y axis\t\n", - "- accuracy\t (y) metric types,support defined by user\n", - "- flops\t(y) \n", - "- params (y) \t\n", - "- latency_batch \t(y) batch = (1, 32), batch 32 defautl\n", - "- latency_batch(1) # use `Inference Time = latency_batch/batch` \n", - "- latency_batch(32)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Collect the json file under special path\n", - "\n", - "**Input**\n", - "- dir_path: Directory to read json file\n", - "- case_type: case to analysis, choose from the keys of METRIC_MAP\n", - "\n", - "**Output**:\n", - "- pandas.DataFrame" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
Inference Timeaccuarcymodel_sizeflopsparamsjson_name
051.2895.9234777.72469.01.878./raw_metric1014/performance_104.json
469.0096.5845358.45548.02.173./raw_metric1014/performance_48.json
371.6296.7447919.32620.02.389./raw_metric1014/performance_69.json
182.7297.05528810.50729.02.701./raw_metric1014/performance_42.json
288.9897.24559211.30786.02.884./raw_metric1014/performance_52.json
\n", - "
" - ], - "text/plain": [ - " Inference Time accuarcy model_size flops params \\\n", - "0 51.28 95.923477 7.72 469.0 1.878 \n", - "4 69.00 96.584535 8.45 548.0 2.173 \n", - "3 71.62 96.744791 9.32 620.0 2.389 \n", - "1 82.72 97.055288 10.50 729.0 2.701 \n", - "2 88.98 97.245592 11.30 786.0 2.884 \n", - "\n", - " json_name \n", - "0 ./raw_metric1014/performance_104.json \n", - "4 ./raw_metric1014/performance_48.json \n", - "3 ./raw_metric1014/performance_69.json \n", - "1 ./raw_metric1014/performance_42.json \n", - "2 ./raw_metric1014/performance_52.json " - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "# example data typo, accuracy , not accuarcy!!\n", - "ret_data = gather_metrics_from_dir(dir_path=\"./raw_metric1014\",case_type=\"Classification\")\n", - "ret_data" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "numpy.float64" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "type(ret_data[\"Inference Time\"][1])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Check single json file" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'model_size': 7.72,\n", - " 'accuarcy': 95.923477,\n", - " 'params': 1.878,\n", - " 'flops': 469,\n", - " 'Inference Time': 51.28}" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "with open('./raw_metric1014/performance_104.json', \"r\") as f:\n", - " v = json.load(f)\n", - "v" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Show the Pruning case\n", - "\n", - "Input:\n", - "- data with pandas.DataFrame\n", - "- four label of x axis\n", - "- for label of y axis\n", - "- the name of image file to save" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": { - "scrolled": false - }, - "outputs": [ - { - "data": { - "text/plain": [ - "'./default_purning.png'" - ] - }, - "execution_count": 16, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABocAAAPpCAYAAADXRPT+AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAuIwAALiMBeKU/dgAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzs3X1wVPX5///X3mRPkk0wkBAixUC4i7QRCFMVFVpUKvEGFDqgoEUUBD6/ooVaa+tNb6zWVmnVMnwryo2lH1CKgKAF/XQgWgWhYoAQFCRREoWSkAAluyF7d87vj8jWSCAhJNlN9vmYYaaenHOd69CdgYvXvt/HZlmWJQAAAAAAAAAAAMQEe6QbAAAAAAAAAAAAQNshHAIAAAAAAAAAAIghhEMAAAAAAAAAAAAxhHAIAAAAAAAAAAAghhAOAQAAAAAAAAAAxBDCIQAAAAAAAAAAgBhCOAQAAAAAAAAAABBDCIcAAAAAAAAAAABiCOEQAAAAAAAAAABADCEcAgAAAAAAAAAAiCGEQwAAAAAAAAAAADGEcAgAAAAAAAAAACCGEA4BAAAAAAAAAADEEMIhAAAAAAAAAACAGEI4BAAAAAAAAAAAEEMIhwAAAAAAAAAAAGII4RAAAAAAAAAAAEAMIRwCAAAAAAAAAACIIYRDAAAAAAAAAAAAMYRwCAAAAAAAAAAAIIYQDgEAAAAAAAAAAMQQwiEAAAAAAAAAAIAYQjgEAAAAAAAAAAAQQwiHAAAAAAAAAAAAYgjhEAAAAAAAAAAAQAwhHAIAAAAAAAAAAIghhEMAAAAAAAAAAAAxhHAIAAAAAAAAAAAghhAOAQAAAAAAAAAAxBDCIQAAAAAAAAAAgBhCOAQAAAAAAAAAABBDCIcAAAAAAAAAAABiCOEQAAAAAAAAAABADCEcAgAAAAAAAAAAiCGEQwAAAAAAAAAAADGEcAgAAAAAAAAAACCGEA4BAAAAAAAAAADEEGekG0DHdOyYV6ZpRboNAAAAtAN2u02dO7sj3QbQJMw6AAAAaKponnUIh9AqTNNiYAIAAADQ4TDrAAAAoCMgHAIAAABinM0mGUac4uIccjrtcjj+u/t0KGQqGDQVCITk8wVk8W/iAAAAANoJZp0zIxwCAAAAYpTNZpPb7ZJhxMlutzV4jtPpkNPpUHx8nNxuQz5fQF6vX1asTU4AAAAA2g1mncYRDgEAAAAxyDCcSkoyZLfXfXOuqKRS7+46pOLPj6vs8An5AiEZcQ5lZnRS34tSNHxQd+X0SVNCgkuG4ZTH45PPF4zwUwAAAABAfcw6TWOzYiUGQ5uqqvKwDzcAAECUcrsNJSa6JEk79lVo4doilZVXN3pdZkaypo3JUW52uiSppsYvr9d33v3Y7Talpiaddx2gLTDrAAAARC9mnaYjHEKrYGACAACITqeGpWDQ1II1hXpza+k518gb2lMzxg6U02lvkaEpmgcm4OuYdQAAAKITs865sTd+CgAAAICOwDCc4WHpN4u3NWtYkqQ3t5bqN4u3KRg0lZhYt/UCAAAAAEQKs865IxwCAAAAYoDNZlNSkiFJWrCmUAX7Ks6rXsG+Ci1YUyhJSkoyZLM1/JJXAAAAAGhNzDrNQzgEAAAAxAC32yW73a6CfRXN/hbd1725tVQ79lXIbrfL7Xa1SE0AAAAAOBfMOs1DOAQAAAB0cDabZBhxkqRFa4tatPbCdXX1DCNOHfQLdQAAAACiFLNO8xEOAQAAAB2cYcTJbrepqKRSZeXVLVq77HC1ikoqZbfbwkMZAAAAALQFZp3mIxwCAAAAOri4OIck6d1dh1ql/ntf1j11HwAAAABoC8w6zUc4BAAAAHRwTmfdX/uLPz/eKvVP1T11HwAAAABoC8w6zdfxnggAAABAPQ5H3V/7yw6faJX6p7ZvOHUfAAAAAGgLzDrN1/GeCAAAAECDfIFQ69T1B1ulLgAAAAA0BbPOuSMcAgAAAGKE0Ur7ZBsuZ6vUBQAAAICmYNY5d4RDAAAAQAcXCpmSpMyMTq1SP7Nbcr37AAAAAEBbYNZpPsIhAAAAoIMLBusGmb4XpbRK/VN1T90HAAAAANoCs07zEQ4BAAAAHVzgy/23hw/q3ir1h31ZN9BK+3wDAAAAQEOYdZqPcAgAAADo4Hy+gEzTUk6ftPC2CC0lMyNZOX3SZJqWfL5Ai9YGAAAAgLNh1mk+wiEAAACgg7MshYeZaTfntGjtaWPq6vl8AVlWi5YGAAAAgLNi1mk+Z6QbaA+Kior0zDPPaMeOHbIsS7m5uXrggQc0YMCA8DlffPGFrr322jPWGD9+vB5//PEz/vzf//63Vq1apbffflulpaWy2+3q37+//ud//kdXXnlliz4PAAAAYo/X65dhOJWbna68oT315tbS866ZN7SncrPTZZqmvF5/C3QJAAAAAOeGWad5CIcasWfPHk2aNEkXXnihZs2aJdM0tXz5ct1xxx1auXKlevfuLUnq0qWLnnrqqdOuf/fdd/X666/rqquuOut9Nm7cqBdffFEjR47U2LFjFQwGtXbtWt1111367W9/q+9///ut8nwAAACIDZZlyePxqVOnBM0YO1AVx06qYF9Fs+sNyU7XjLEDJUkej09WR/wqHQAAAICox6zTPDaroz5ZC5k+fbp27typt956S507d5YkVVRUaNSoURo2bJjmzZt31uunTJmi3bt3a8uWLTIM44zn7d+/X6mpqerSpUv4mN/v180336yamhq98847LfNAbaSqyiPT5KMFAAAQbdxuQ4mJLgWDphasKWzWt+ryhvbUjLED5XTaVVPjl9frO6+e7HabUlOTzqsG0FaYdQAAAKITs865YeVQI7Zv367hw4eHgyFJSk9P12WXXab8/Hx5vV653e4Gr62oqNC2bdt0yy23nDUYkqR+/fqddszlcum73/2ulixZIo/Ho6Sk6PwQAQAAoP04NdwkJrr0w/GDdeXA7lq4rkhlh6sbvTYzI1nTxuQoNztdklpkWAIAAACAlsCsc24Ihxrh9/sVHx9/2vH4+HgFAgHt379fgwcPbvDa9evXyzRNjR49utn3P3LkiBISEpSQkNDsGgAAAMBXeb0+BYMhJSUZys1O1/wHrlFRSaXe23VIxZ8fV1l5tXz+oAyXU5ndktX3ohQNG9RdOX3SJEmmacrj8cnnC0b4SQAAAADgv5h1mo5wqBFZWVnauXOnQqGQHA6HpLrAqLCwUJJUXl5+xmvXrVunrl27aujQoc26d2lpqf7xj38oLy8vfG8AAACgJfh8Qfn9IbndLhlGnHL6pIUHojMxTUs+X0Ber7/D7rsNAAAAoH1j1mkae6QbiHaTJk3SgQMH9PDDD6u4uFiffPKJHnzwQR05ckSSVFtb2+B1n332mfbs2aMbb7xRdvu5/zafPHlSP/rRjxQfH6/777//vJ4BAAAAaMipF7cePepRdXWtamsDCgZDsiwr/CsYDKm2NqDq6lodPerp0C9kBQAAANAxMOs0jpVDjZg4caIOHz6sRYsWac2aNZKknJwcTZ06Vc8///wZ3zf0+uuvS1KztpQLhUKaM2eOiouL9eKLL6pbt27NfwAAAACgEZYl1dYGVFsbiHQrAAAAANBimHXOjHCoCebMmaO7775b+/fvV3JysrKzs/XHP/5RktSrV68Gr3njjTeUlZWlnJycc77fI488orfffltz587VFVdccT6tAwAAAAAAAAAA1EM41EQXXHCBvv3tb4f/e8uWLcrIyFDv3r1PO3fXrl0qLS3Vfffdd873+f3vf6/Vq1froYce0k033XRePQMAAAAAAAAAAHwd7xxqhvXr12v37t268847G3yfUGNbyp08eVIlJSU6evRoveMLFy7U4sWLNXPmTN15550t3zgAAAAAAAAAAIh5rBxqxAcffKD58+frqquuUkpKinbt2qXVq1dr+PDhmjx58mnnh0IhbdiwQYMHD1ZmZmaDNQsLCzV58mTNmjVL9957ryTpH//4h55++mn16tVLvXv31tq1a+tdc9VVVyktLa3lHxAAAAAAAAAAAMQUwqFGdOvWTQ6HQ4sWLZLX61WPHj00e/ZsTZkyRU7n6b99W7ZsUWVlpWbOnHlO99m7d68k6cCBA/rpT3962s+XLl1KOAQAAAAAAAAAAM6bzbIsK9JNoOOpqvLINPloAQAAoHF2u02pqUmRbgNoEmYdAAAANFU0zzq8cwgAAAAAAAAAACCGEA4BAAAAAAAAAADEEMIhAAAAAAAAAACAGOKMdAMAAAAAgOhVWFio1157Tdu2bdPBgweVkpKiQYMGafbs2crKymr0+qKiIs2bN09FRUWqqalRjx49NH78eN1+++1yOBxt8AQAAAAAvo5wCAAAAABwRgsXLlRBQYHy8vKUnZ2tI0eOaNmyZRo3bpxWrFih/v37n/HaoqIi3XbbberVq5fuuecexcfH65///KeeeOIJlZWV6ZFHHmnDJwEAAABwis2yLCvSTaDjqaryyDT5aAEAAKBxdrtNqalJkW4DZ1BQUKCcnBy5XK7wsQMHDmj06NEaNWqU5s6de8ZrH330Ua1Zs0bvvfeeUlJSwsfvuOMOffzxx/rwww9btffWwKwDAACApormWYd3DgEAAAAAzmjIkCH1giFJ6tWrl/r166dPP/30rNd6PB4ZhqFOnTrVO961a1fFx8e3eK8AAAAAmoZwCAAAAABwTizLUmVlpTp37nzW8y677DJ5PB794he/UElJiQ4ePKiXX35Z//jHPzR9+vQ26hYAAADA1/HOIQAAAADAOVm3bp3Ky8t13333nfW8CRMmqLi4WCtWrNDKlSslSQ6HQ48++qgmTpzYFq0CAAAAaADhEAAAAACgyUpKSvTYY48pNzdXY8eOPeu5DodDF110kYYNG6a8vDy5XC79/e9/1+OPP66uXbtq5MiRbdQ1AAAAgK+yWZbFmzTR4nhJKwAAAJoqml/SivqOHDmiiRMnKhgMasWKFerWrdtZz3/hhRe0dOlSvfXWW3K73eHjP/jBD3TgwAHl5+fL6Wxf31lk1gEAAEBTRfOswzuHAAAAAACNqq6u1j333KPq6motXLiw0WBIkpYvX67LL7+8XjAkSddee60qKip08ODB1moXAAAAwFm0r69oAQAAAADanM/n08yZM3XgwAEtWbJEffv2bdJ1lZWVMk3ztOOBQECSFAwGW7RPAAAAAE3DyiEAAAAAwBmFQiHNnj1bO3fu1HPPPafc3NwGz6uoqFBJSUk4+JGkrKwsbdmyRceOHatXb8OGDXK73crMzGz1/gEAAACcjpVDAAAAAIAz+t3vfqdNmzbp6quv1vHjx7V27dp6P7/55pslSX/84x+1Zs0abdy4UT169JAk3XPPPXrggQc0YcIETZgwQfHx8fr73/+uPXv2aPbs2YqLi2vz5wEAAABAOAQAAAAAOIu9e/dKkvLz85Wfn3/az0+FQw0ZM2aMOnfurBdeeEGLFi2Sx+NRVlaWfv3rX+u2225rtZ4BAAAAnJ3Nsiwr0k2g46mq8sg0+WgBAACgcXa7TampSZFuA2gSZh0AAAA0VTTPOqwcAgAAiCE2m2QYcYqLc8jptMvh+O8rKEMhU8GgqUAgJJ8vIL5CBAAAAABAx0Q4BAAAEANsNpvcbpcMI052u63Bc5xOh5xOh+Lj4+R2G/L5AvJ6/WKhOQAAAAAAHQvhEAAAQAdnGE4lJRmy2+tWCRWVVOrdXYdU/PlxlR0+IV8gJCPOocyMTup7UYqGD+qunD5pSkhwyTCc8nh88vmCEX4KAAAAAADQUnjnEFoF+3ADABAd3G5DiYkuSdKOfRVauLZIZeXVjV6XmZGsaWNylJudLkmqqfHL6/W1aq+IXdG8Dzfwdcw6AAAAaKponnUIh9AqGJgAAIi8U8FQMGhqwZpCvbm19Jxr5A3tqRljB8rptBMQodVE88AEfB2zDgAAAJoqmmcde+OnAAAAoL0xDGc4GPrN4m3NCoYk6c2tpfrN4m0KBk0lJtZtMwcAAAAAANo3wiEAAIAOxmazKSnJkCQtWFOogn0V51WvYF+FFqwplCQlJRmy2Wzn3SMAAAAAAIgcwiEAAIAOxu12yW63q2BfRbNXDH3dm1tLtWNfhex2u9xuV4vUBAAAAAAAkUE4BAAA0IHYbJJhxEmSFq0tatHaC9fV1TOMOLF4CAAAAACA9otwCAAAoAMxjDjZ7TYVlVSqrLy6RWuXHa5WUUml7HZbOIACAAAAAADtD+EQAABABxIX55AkvbvrUKvUf+/LuqfuAwAAAAAA2h/CIQAAgA7E6az7613x58dbpf6puqfuAwAAAAAA2h+megAAgA7E4aj7613Z4ROtUv/UVnWn7gMAAAAAANofpnoAAIAOyBcItU5df7BV6gIAAAAAgLbjjHQDAAAAaHlGnEO1/pYPiAwXf30EAAAAEH1sNskw4hQX55DTaa+320EoZCoYNBUIhOTzBWRZEWwUiBJM9wAAAB1IKGTK6XQoM6OTPik71uL1M7slh+8DAAAAAJFms9nkdrtkGHGy220NnuN0OuR0OhQfHye325DPF5DX65dFSoQYRjgEAADQgQSDdeFQ34tSWiUc6ntRSvg+AAAAABBJhuFUUpIhu71ulVBRSaXe3XVIxZ8fV9nhE/IFQjLi6r481/eiFA0f1F05fdKUkOCSYTjl8fjk87F1NmIT4RAAAEAHEgiEFB8fp+GDumv95s9avP6wQd3D9wEAAACASHG7DSUmuiRJO/ZVaOHaIpWVV592Xq0/pE/KjumTsmNav/kzZWYka9qYHOVmp6tTpwTV1Pjl9fraun0g4uyNnwIAAID2wucLyDQt5fRJC28B11IyM5KV0ydNpmnJ5wu0aG0AAAAAaKpTwVAwaGr+yp36xQvvNxgMNaTscLV+8cL7mr9yp4JBU4mJLrndRit3DEQfwiEAAIAOxLIUDm6m3ZzTorWnjamrxwtcAQAAAESKYTjDwdBvFm/Tm1tLm1Xnza2l+s3ibeGAyDDYZAuxhXAIAACgg/F6/TJNU7nZ6cob2rNFauYN7anc7HSZpimv198iNQEAAADgXNhsNiUl1a3yWbCmUAX7Ks6rXsG+Ci1YUyhJSkoyZLPZzrtHoL0gHAIAAOhgLMuSx1O3Z/aMsQM1JDv9vOoNyU7XjLEDJUkej08Wy4YAAAAARIDb7ZLdblfBvopmrxj6uje3lmrHvgrZ7Xa53a4WqQm0B4RDTVBUVKSpU6dqyJAhys3N1d13362PP/643jlffPGFsrOzz/jrkUceadK9Vq5cqeuvv16XXHKJrrvuOv31r39tjUcCAAAdnM8XVE2NX06nXY/efXmzVxDlDe2pR+++XE6nXTU1fvl8wRbuFAAAAAAaZ7NJhhEnSVq0tqhFay9cV1fPMOLE4iHECjZSbMSePXs0adIkXXjhhZo1a5ZM09Ty5ct1xx13aOXKlerdu7ckqUuXLnrqqadOu/7dd9/V66+/rquuuqrRe73yyiv65S9/qVGjRumuu+7S9u3b9fjjj+vkyZOaPn16iz8bAADo2LzeutVDiYku/XD8YF05sLsWritS2eHGX9SamZGsaWNylPvlqqOaGn+4HgAAAAC0NcOIk91uU1FJpcrKG59pzkXZ4WoVlVQqp0+aDCNOtbWBFq0PRCObxb4gZzV9+nTt3LlTb731ljp37ixJqqio0KhRozRs2DDNmzfvrNdPmTJFu3fv1pYtW2QYxhnPq62t1Xe/+10NHjxYCxYsCB//yU9+oo0bN+rtt9/WBRdc0DIP1QaqqjwyTT5aAABEA8NwKinJkN1et2i8qKRS7+06pOLPj6usvFo+f1CGy6nMbsnqe1GKhg3qrpw+aZIk0zTl8fhYMYRWZbfblJqaFOk2gCZh1gEAIDKSk+MVHx+nP68u1PrNn7V4/RuvytLMcQNVWxtQdXVti9dHbIrmWYeVQ43Yvn27hg8fHg6GJCk9PV2XXXaZ8vPz5fV65Xa7G7y2oqJC27Zt0y233HLWYEiStm3bpuPHj2vSpEn1jt9+++16/fXX9fbbb+vmm28+/wcCAAAxx+cLyu8Pye12yTDilNMnLRz+nIlpWvL5AvJ6/bxjCAAAAEDEOZ11X3Yr/vx4q9Q/VffUfYCOjk96I/x+v+Lj4087Hh8fr0AgoP3795/x2vXr18s0TY0ePbrR+3z00UeSpJycnHrHv/Wtb8lut5/2jiMAAIBzYVmWPB6fjh71qLq6VrW1AQWDIVmWFf4VDIbC35I7etQjj8dHMAQAAAAgKjgcdf+UXXb4RKvUP7VV3an7AB0dK4cakZWVpZ07dyoUCsnhcEiqC4wKCwslSeXl5We8dt26deratauGDh3a6H2OHDkih8Oh1NTUesddLpdSUlJUUVFxHk8BAABQx7Kk2toAe2gDAAAAaJd8gVDr1PWzlTZiCzFoIyZNmqQDBw7o4YcfVnFxsT755BM9+OCDOnLkiKS6dwU15LPPPtOePXt04403hvf3P5va2lrFxcU1+DPDMM54HwAAAAAAAACIFUaco3XqulhHgdhCONSIiRMnaubMmXrjjTd04403avTo0SorK9PUqVMl6YzvG3r99dclqUlbykn/3aauIT6fr8Gt7QAAAAAAAAAgFoRCpiQpM6NTq9TP7JZc7z5AR0c41ARz5szR5s2btWzZMq1bt06rVq0K77/fq1evBq954403lJWVddo7hM6ka9euCoVCqqqqqnfc7/fr+PHjSk9PP69nAAAAAAAAAID2KhisC236XpTSKvVP1T11H6CjIxxqogsuuEDf/va3lZ2dLUnasmWLMjIy1Lt379PO3bVrl0pLS5u8akiSBgwYIEkqKiqqd7yoqEimaeriiy8+j+4BAAAAAAAAoP0KfPmuoeGDurdK/WFf1g200juNgGhDONQM69ev1+7du3XnnXc2+D6hxraUO3nypEpKSnT06NHwsaFDhyolJUUvv/xyvXNffvllJSQkaMSIES33AAAAAAAAAADQjvh8AZmmpZw+aeEt4FpKZkaycvqkyTQt+XwNv/oD6GgIhxrxwQcfaMqUKXrxxRe1cuVKPfLII/rJT36i4cOHa/LkyaedHwqFtGHDBg0ePFiZmZkN1iwsLNQNN9ygZcuWhY/Fx8frvvvuU35+vu677z6tXLlSDz74oNatW6eZM2cqJaV1lksCAAAAAAAAQLSzLIWDm2k3N+1VHk01bUxdPZ8voC/fJgJ0eM5INxDtunXrJofDoUWLFsnr9apHjx6aPXu2pkyZIqfz9N++LVu2qLKyUjNnzjzne91+++2Ki4vT4sWLtWnTJl144YX6+c9/rjvvvLMlHgUAAAAAAAAA2i2v1y/DcCo3O115Q3vqza2l510zb2hP5WanyzRNeb3+FugSaB9slkUWipZXVeWRafLRAgAAQOPsdptSU5Mi3QbQJMw6AABElmE41alTgoJBU79ZvE0F+yqaXWtIdroevftyOZ12nThxUj5fsAU7BaJ71mFbOQAAAAAAAABAu+DzBVVT45fTadejd1+uvKE9m1Unb2jPcDBUU+MnGELMYeUQWgXfpgMAAEBTRfO36YCvY9YBACA6uN2GEhNdkqQd+yq0cF2Ryg5XN3pdZkaypo3JUW52uiSppsYvr9fXqr0idkXzrEM4hFbBwAQAAICmiuaBCfg6Zh0AAKKHYTiVlGTIbq/bIKuopFLv7Tqk4s+Pq6y8Wj5/UIbLqcxuyep7UYqGDequnD5pkiTTNOXx+FgxhFYVzbOOM9INAAAAAAAAAABwrny+oPz+kNxulwwjTjl90sLhz5mYpiWfLyCv1y/WTSCWEQ4BAAAAAAAAANoly7Lk8fjk9fpkGHGKi3PI6bTL4bCHzwmFTAWDpgKBkHy+gMiEAMIhAAAAAAAAAEA7Z1lSbW1AtbWBSLcCtAv2xk8BAAAAAAAAAABAR0E4BAAAAAAAAAAAEEMIhwAAAAAAAAAAAGII4RAAAAAAAAAAAEAMIRwCAAAAAAAAAACIIYRDAAAAAAAAAAAAMYRwCAAAAAAAAAAAIIYQDgEAAAAAAAAAAMQQwiEAAAAAAAAAAIAYQjgEAAAAAAAAAAAQQwiHAAAAAAAAAAAAYgjhEAAAAAAAAAAAQAwhHAIAAAAAAAAAAIghhEMAAAAAAAAAAAAxxBnpBgAAAAAA0auwsFCvvfaatm3bpoMHDyolJUWDBg3S7NmzlZWV1aQaW7Zs0fPPP689e/bINE1lZWVp2rRpuuGGG1q5ewAAAAANIRwCAAAAAJzRwoULVVBQoLy8PGVnZ+vIkSNatmyZxo0bpxUrVqh///5nvX7VqlV6+OGHddVVV+nHP/6x7Ha7PvvsM/373/9uoycAAAAA8HU2y7KsSDeBjqeqyiPT5KMFAACAxtntNqWmJkW6DZxBQUGBcnJy5HK5wscOHDig0aNHa9SoUZo7d+4Zr/3iiy904403avz48XrkkUfaot1Wx6wDAACApormWYd3DgEAAAAAzmjIkCH1giFJ6tWrl/r166dPP/30rNe+8sorCoVC+tGPfiRJ8nq94vuJAAAAQOQRDgEAAAAAzollWaqsrFTnzp3Pet6WLVvUu3dvvfPOO/rOd76jIUOG6PLLL9ezzz4r0zTbqFsAAAAAX8c7hwAAAAAA52TdunUqLy/Xfffdd9bzSktL5XA49POf/1zTpk3TxRdfrP/7v//Tn//8Z4VCId1///1t1DEAAACAr+KdQ2gV7MMNAACApormfbhxupKSEk2YMEH9+vXTsmXL5HA4znjugAEDZJqm7r//fk2fPj18fNq0afrggw+0efNmJSW1r//vmXUAAADQVNE867CtHAAAAACgSY4cOaIZM2YoOTlZzz333FmDIUmKj4+XJN100031jt90002qra3Vxx9/3Gq9AgAAADgzwiEAAAAAQKOqq6t1zz33qLq6WgsXLlS3bt0avSY9PV2SlJaWVu94ly5dJEn/+c9/Wr5RAAAAAI0iHAIAAAAAnJXP59PMmTN14MABPf/88+rbt2+TrvvWt74lSSovL693vKKiQtJ/QyIAAAAAbYtwCAAAAABwRqFQSLNnz9bOnTv13HPPKTc3t8HzKioqVFJSokAgED52ww03SJJeffXV8DHTNLVDpduDAAAgAElEQVR69WqlpKQoJyendZsHAAAA0CBnpBsAAAAAAESv3/3ud9q0aZOuvvpqHT9+XGvXrq3385tvvlmS9Mc//lFr1qzRxo0b1aNHD0nStddeqyuuuEILFizQsWPHlJ2drY0bN+rDDz/UY489JpfL1ebPAwAAAIBwCAAAAABwFnv37pUk5efnKz8//7SfnwqHGmKz2TR//nw9++yz2rBhg1avXq2srCw9/fTTGjNmTKv1DAAAAODsbJZlWZFuAh1PVZVHpslHCwAAAI2z221KTU2KdBtAkzDrAAAAoKmiedbhnUMAAAAAAAAAAAAxhHAIAAAAAAAAAAAghhAOAQAAAAAAAAAAxBDCIQAAAAAAAAAAgBhCOAQAAAAAAAAAABBDCIcAAAAAAAAAAABiCOFQExQVFWnq1KkaMmSIcnNzdffdd+vjjz9u8Fy/36/nn39eeXl5uuSSS3TllVdq+vTpOnz4cKP3qa6u1lNPPaXrrrtOAwcO1NVXX62HHnpIhw4daulHAgAAAAAAAAAAMcoZ6Qai3Z49ezRp0iRdeOGFmjVrlkzT1PLly3XHHXdo5cqV6t27d/jcQCCgGTNmaMeOHRo/fryys7N14sQJ7dq1S9XV1crIyDjjfUzT1F133aWSkhJNnDhRWVlZKi0t1fLly/Xee+9p/fr1SkpKaotHBgAAAAAAAAAAHRjhUCOee+45xcfH65VXXlHnzp0lSWPGjNGoUaP0zDPPaN68eeFzX3rpJX3wwQdavny5Bg4ceE732blzp3bv3q1f/OIXuv3228PHs7Ky9NBDD+n999/X9773vZZ5KAAAAAAAAAAAELPYVq4R27dv1xVXXBEOhiQpPT1dl112mfLz8+X1eiXVrfxZunSpRo4cqYEDByoYDOrkyZNNvo/H45Ekpaam1jvetWtXSZJhGOf7KAAAAAAAAAAAAIRDjfH7/YqPjz/teHx8vAKBgPbv3y9JKi4uVkVFhbKzs/Xoo49q8ODBGjx4sEaPHq2tW7c2ep+cnBwlJibqueee0/vvv6/y8nL961//0tNPPx1+dxEAAAAAAAAAAMD5IhxqRFZWlnbu3KlQKBQ+5vf7VVhYKEkqLy+XJJWWlkqq21ruX//6lx577DE9+eST8vv9mjZtmvbu3XvW+3Tp0kXPPPOMqqurNWXKFH3nO9/RD37wA6Wnp+svf/mLnE52AAQAAAAAAAAAAOePcKgRkyZN0oEDB/Twww+ruLhYn3zyiR588EEdOXJEklRbWytJ4e3lvF6vXnrpJY0bN07jxo3TkiVLJEkLFy5s9F5dunTRN7/5Tc2ZM0fz58/Xvffeqw8//FA///nPW+npAAAAAAAAAABArGE5SiMmTpyow4cPa9GiRVqzZo2kui3gpk6dqueff15ut1uSwlvPDRkyRBdeeGH4+u7du2vIkCHasWPHWe/z+eefa/Lkyfr973+vUaNGSZJGjhypb3zjG/rZz36md955R9/97ndb4xEBoMOy2STDiFNcnENOp10Ox3+/ExEKmQoGTQUCIfl8AVlWBBsFAAAAAAAA2hArh5pgzpw52rx5s5YtW6Z169Zp1apVsr78V8RevXpJktLT0yVJaWlpp12fmpqqEydOnPUeq1evls/n09VXX13v+DXXXCNJKigoON/HAICYYbPZlJRkqEuXJCUnxys+Pk5Op0M2my38y+l0KD4+TsnJ8erSJUlJSYZsNlukWwcAAAAAAABaHSuHmuiCCy7Qt7/97fB/b9myRRkZGerdu7ckqX///oqLiwu/g+irKioq1KVLl7PWr6qqkmVZ9d5tJEnBYFCSTjsOAGiYYTiVlGTIbq/7/kNRSaXe3XVIxZ8fV9nhE/IFQjLiHMrM6KS+F6Vo+KDuyumTpoQElwzDKY/HJ58vGOGnAAAAAAAAAFoP4VAzrF+/Xrt379aDDz4Y/sfHpKQkfec739Hbb7+tkpIS9enTR5JUUlKiHTt26NZbbw1ff/LkSR06dEidO3cOh0a9evWSZVnasGGDxo0bFz73jTfekCR985vfbKvHA4B2y+02lJjokiTt2FehhWuLVFZefdp5tf6QPik7pk/Kjmn95s+UmZGsaWNylJudrk6dElRT45fX62vr9gEAAAAAAIA2YbOs6HnLwve+9z2NHz9e3//+95WamhrpdiRJH3zwgebPn6+rrrpKKSkp2rVrl1avXq0rr7xSzz//vJzO/+ZrxcXFGj9+vNxutyZPnixJWrp0qUKhkF577TV169ZNkrRt2zZNnjxZs2bN0r333itJOnbsmEaPHq3jx4/rtttuU79+/bRnzx69+uqr6t27t1avXi2Xy9X2vwHNVFXlkWlGzUcLQAw4FQwFg6YWrCnUm1tLz7lG3tCemjF2oJxOOwERALQhu92m1NSkSLcBNAmzDgAAAJoqmmedqAqHLr74YtlsNjkcDl177bW69dZbdeWVV0a0p7KyMv3617/Wnj175PV61aNHD40dO1ZTpkxpMKzZs2eP5s6dq507d8pms2no0KH66U9/Gn43kdRwOCRJ5eXleu6557Rt2zaVl5crJSVFV199tebMmdPotnTRhoEJQFsyDKc6dUpQMGjqN4u3qWBfRbNrDclO16N3Xy6n064TJ06yxRwAtIFoHpiAr2PWAQAAQFNF86wTVeHQs88+qzVr1oTf22Oz2dSjRw9NmDBB48aNi5rVRGgcAxOAtmKz2dSlS6Lsdrvmr9zZrBVDX5c3tKd+OH6wTNPU0aM1iqI/KgGgQ4rmgQn4OmYdAAAANFU0zzpRFQ5Jkmmaeuedd7RixQq9++67CoVC4dVEI0eO1IQJEyK+mgiNY2AC0FaSkgwlJLhUsK9Cv3zh/Rar+9j0K5Sbna6TJ/3yeNheDgBaUzQPTMDXMesAAACgqaJ51om6cOirysvL9eqrr2rVqlU6dOiQJFYTtRcMTADags0mdemSJLvdph8+tUll5dUtVjszI1nzH7hGpmnp6FGPovdPSwBo/6J5YAK+jlkHAAAATRXNs05Uh0OnWJal9957T3/729+Un5+vYDDIaqIox8AEoC3Ex8cpOTleRSWV+vn/29zi9Z/8/65STp80VVfXqrY20OL1AQB1onlgAr6OWQcAAABNFc2zjj3SDTSFzWbT8OHDNW/ePG3cuFGXXnqpLMtSMBjUW2+9palTp2rUqFFasWKFQqFQpNsFALSRuDiHJOndXYdapf57X9Y9dR8AAAAAAACgI2gX4ZAkHTp0SH/60580YcIEbd++XVJdaDRgwAA5HA6VlpbqV7/6lSZMmKCjR49GuFsAQFtwOuv+GCv+/Hir1D9V99R9AAAAAAAAgI7AGekGziYUCik/P18rVqzQli1bZJqmLMtSSkqKxo4dq4kTJyozM1OVlZV6+eWXtWTJEn300Uf6wx/+oCeeeCLS7QMAWpnDURfalB0+0Sr1T73D6NR9AAAAAAAAgI4gKsOhL774QitXrtTq1atVWVmpU69Fys3N1cSJE5WXlyeXyxU+Py0tTffee69GjBih8ePH65///GekWgcARIAv0Dpbivr8wVapCwAAAAAAAERSVIVDb775pv72t79p69atsixLlmXJ7XZrzJgxmjhxovr373/W6y+55BKlpaWpsrKyjToGAEQDI86hWn/LB0SGK6r+mAQAxIgTJ05o3759crvd+uY3v1nvZxUVFXr88ce1efNm2e12jRgxQj/72c+UmpoaoW4BAAAAtEdR9a9es2fPDv/vAQMGaOLEibrpppuUmJjY5BpfXVEEAOjYQiFTTqdDmRmd9EnZsRavn9ktOXwfAADayquvvqqnn35akyZNqhcOBYNBTZ06VcXFxeHdFd544w3t3btXq1atYhYCAAAA0GRRFQ4ZhqEbbrhBEydO1MCBA5tVY9OmTS3cFQAgWgWDdeFQ34tSWiUc6ntRSvg+AAC0lc2bN0uSbrzxxnrH169fr/379ys+Pl5TpkxRfHy8Fi1apOLiYv3tb3/THXfcEYl2AQAAALRDURUOvfvuu+rUqVOk2wAAtBOBQEjx8XEaPqi71m/+rMXrDxvUPXwfAADaSmlpqSSdtq32hg0bZLPZdO+992rq1KmSpMzMTP34xz/WW2+9RTgEAAAAoMnskW7gqwiGAADnwucLyDQt5fRJC28B11IyM5KV0ydNpmnJ5wu0aG0AAM7m2LFjSkxMVFJSUr3j27dvlySNHj06fGzkyJGy2Wzav39/m/YIAAAAoH2LqnCoqqpKS5cu1RtvvNHouevWrdPSpUt19OjRNugMABCNLEvh4GbazTktWnvamLp6Pl9AX77WAQCANuHz+WSa9bc0/fTTT1VdXa2ePXsqPT09fNzlcqlTp07yeDxt3SYAAACAdiyqwqF169bpySefDG+jcDZ79+7Vk08+2aQgCQDQcXm9fpmmqdzsdOUN7dkiNfOG9lRudrpM05TX62+RmgAANFVqaqpqa2t15MiR8LH3339fkpSbm3va+T6fT8nJLbuCFgAAAEDHFlXh0KZNmyRJeXl5jZ57yy23yLIsbdy4sbXbAgBEMcuy5PH4JEkzxg7UkOz0Rq44uyHZ6ZoxdqAkyePxyWLZEACgjV1yySWSpCVLlkiSTp48qVdeeUU2m01XXHFFvXPLy8tVW1urrl27tnmfAAAAANqvqAqHysrK5HK51KdPn0bP7d+/vwzD0Oeff94GnQEAopnPF1RNjV9Op12P3n15s1cQ5Q3tqUfvvlxOp101NX75fMEW7hQAgMbdeuutsixLS5Ys0fXXX69Ro0Zp//796tKli6677rp6527dulVS3XwEAAAAAE0VVeFQVVWVEhISmnx+QkKCKisrW7EjAEB74fX6wgHRD8cP1mPTr1BmRtO22MnMSNZj06/QD8cPDgdDXq+vlTsGAKBhw4cP16xZs2Sz2fTZZ5+poqJCnTt31ty5cxUfH1/v3FPbbF9++eWRaBUAAABAO+WMdANflZSUpOrqavl8PhmGcdZzfT6fqqurlZSU1EbdAQCindfrUzAYUlKSodzsdM1/4BoVlVTqvV2HVPz5cZWVV8vnD8pwOZXZLVl9L0rRsEHdldMnTZJkmqY8Hh8rhgAAETdr1iyNGzdOu3btUqdOnTRw4MDT3ivk9/s1ePBgDRo0SCNGjIhMowAAAADapagKh/r166ft27crPz+/0fcObdq0SaFQSFlZWW3UHQCgPfD5gvL7Q3K7XTKMOOX0SQuHP2dimpZ8voC8Xj/vGAIARI3u3bure/fuZ/y5y+XSD3/4wzbsCAAAAEBHEVXbyl1zzTWyLEtPPfWUysvLz3heeXm5nnrqKdlsNo0cObINOwQAtAeWZcnj8enoUY+qq2tVWxtQMBiSZVnhX8FgSLW1AVVX1+roUY88Hh/BEAAAAAAAAGKCzYqifwk7efKkrr/+epWXlyslJUUzZszQiBEjwt+WO3TokPLz8/XCCy/o2LFjysjI0Pr165WYmBjhzvF1VVUemWbUfLQAAAAQxex2m1JT2S4a7QOzDgAAAJoqmmedqAqHJGnPnj2aNm2ajh07JpvN1uA5lmWpc+fOWrx4sQYMGNDGHaIpGJgAAADQVNE8MEWKZVlatWqV1q9fr3379uk///mPQqHQGc+32Wz66KOP2rDD2MWsAwAAgKaK5lknqt45JEnf+ta3tGbNGv3hD3/Qhg0bFAzWfyl4XFycbrzxRs2ZM0fdunWLUJcAAAAA0Dq8Xq+mT5+ugoICtjwFAAAA0CqibuXQV508eVJFRUU6cuSIbDabunbtqpycHMXHx0e6NTSCb9MBAACgqaL523SR8Pvf/15LliyRw+HQTTfdpGHDhiktLU0Oh+Os11122WVt1GFsY9YBAABAU0XzrBNVK4cOHTokSUpNTZVhGEpISNCll14a4a4AAAAAoO28+eabstlseuihh3T77bdHuh0AAAAAHZA90g181TXXXKORI0fq+PHjkW4FAAAAACKiqqpKDodD48ePj3QrAAAAADqoqFo5lJiYqLi4ON4lBAAAACBmde3aVSdOnJDL5Yp0KwAAAAA6qKhaOfSNb3xDJ0+eVCgUinQrAAAAABARw4YNk8fjUUlJSaRbAQAAANBBRVU4NHLkSAUCAb3zzjuRbgUAAAAAImLGjBm64IIL9MQTTygQCES6HQAAAAAdkM2yLCvSTZxSU1OjsWPHqqamRi+++KIuvvjiSLeEZqqq8sg0o+ajBQAAgChmt9uUmpoU6TaiSmFhoWbPni232627775bOTk5crvdZ72me/fubdRdbGPWAQAAQFNF86wTVeHQa6+9pmPHjmnevHny+/0aNmyYhgwZotTUVDkcjjNed8stt7Rhl2gKBiYAAAA0VTQPTJHi8Xj07LPP6n//939ls9kaPd9ms+mjjz5qg87ArAMAAICmiuZZJ6rCoYsvvjg8+FiWxRDUjjEwAQAAoKmieWCKhKNHj2ry5MkqKSnRuYxre/fubcWucAqzDgAAAJoqmmcdZ6Qb+Cq2QQAAAAAQ6+bPn6/i4mIlJCTorrvu0rBhw5SWlnbW3RQAAAAA4FxEVTi0adOmSLcAAAAAABGVn58vm82mJ554QjfccEOk2wEARCGbTTKMOMXFOeR02uVw2MM/C4VMBYOmAoGQfL6AomfPIABANImqcAgAAAAAYl1VVZXi4uI0atSoSLcCAIgyNptNbrdLhhEnu73h1zE4nQ45nQ7Fx8fJ7Tbk8wXk9frPaatSAEDHRzgEAAAAAFEkPT1dVVVVbCMHAKjHMJxKSjJkt9etEioqqdS7uw6p+PPjKjt8Qr5ASEacQ5kZndT3ohQNH9RdOX3SlJDgkmE45fH45PMFI/wUAIBoQTgEICqxRB4AAMSqa665RkuXLtXu3bt1ySWXRLodFRYW6rXXXtO2bdt08OBBpaSkaNCgQZo9e7aysrLOqdYjjzyilStXasSIEVqwYEErdQwAHY/bbSgx0SVJ2rGvQgvXFqmsvPq082r9IX1SdkyflB3T+s2fKTMjWdPG5Cg3O12dOiWopsYvr9fX1u0DAKKQzYrSNaUFBQX68MMPVV5erpqamjMufbXZbPrtb3/bxt2hMVVVHplmVH60EOWaskT+q0zTYok8AADtnN1uU2pqUqTbiBrHjx/XzTffrNTUVL300kvq1KlTRPu57777VFBQoLy8PGVnZ+vIkSNatmyZampqtGLFCvXv379JdXbv3q3bbrtNDodDV1xxRbsNh5h1ALS1U8FQMGhqwZpCvbm19Jxr5A3tqRljB8rptBMQAUAbiuZZJ+rCoQMHDuj+++/XRx99VO+4ZVmy2WwNHvv444/bskU0AQMTmqO5S+QlyTRNlsgDANBORfPAFAkffPCBDh48qN/+9rdyuVyaMGGCBg4cKLfbfdbrLr300lbpp6CgQDk5OXK5XOFjBw4c0OjRozVq1CjNnTu30RqWZWnixInq3bu3tm7dqn79+hEOAUATGIZTnTolKBg09ZvF21Swr6LZtYZkp+vRuy+X02nXiRMnmZ8BoA1E86wTVdvKHTt2THfeeafKy8uVlpamSy+9VBs2bFB8fLyuu+46VVZWateuXfJ6vercubNGjBjRJn0VFRXpmWee0Y4dO2RZlnJzc/XAAw9owIABp53r9/u1ePFivfbaazp48KCSk5OVk5Ojxx57TBkZGY3eq7KyUn/605+Un5+v48ePq2vXrho6dCiro9DhsUQeAACgzg9+8IN6X4z785//3Og1NpvttC/YtZQhQ4acdqxXr17q16+fPv300ybVWLt2rT755BPNmzdPt956a0u3CAAdks1mU1KSIUlasKbwvIIhSSrYV6EFawr1w/GDlZRkyO8PsQMHAMSwqAqH/vKXv6i8vFyDBg3SSy+9pISEBG3YsEFJSUl66qmnJEk1NTWaP3++Fi1aJMMw9Ktf/apVe9qzZ48mTZqkCy+8ULNmzZJpmlq+fLnuuOMOrVy5Ur179w6fGwgENGPGDO3YsUPjx49Xdna2Tpw4oV27dqm6urrRcOjf//63Jk6cKEm67bbb1K1bN1VUVKiwsLBVnxGItPNZIl92uFq/eOH98BL5UwETAREAAGjPzvUf69r6H/csy1JlZaX69evX6Lkej0dz587VzJkz1bVr1zboDgA6BrfbJbvdroJ9Fc3aSq4hb24t1ZUDuys3O11ut0seD7MzAMSqqAqH3nnnHdlsNs2ZM0cJCQkNnpOYmKgHHnhAgUBAf/3rX3X55Zfr+uuv///Zu/foqMp7/+OfPZnJEJIAISEJCMg9UAOSHMpNQGw5EmsRoaVWtICAQHvQglaoP6THohZqKcpSBBEQUbCVIpdjqZQKtF6QiwnEqFwS5SKQCwFKMknmkpnfH5ipSEgGnMnskPdrLVdX9jz72d/NmlX45rOfZ4espoULF6pRo0b605/+pLi4OEnSHXfcoaFDh+qZZ57Rc8895x+7cuVK7dmzR2vWrFGPHj2u+Fq/+c1vFBERob/85S/+awHXOrvd6g+Gvs0S+bc/PKrCs+WaPb7PV/NVskQeAADUSwcOHAh3CbXatGmTCgoK9OCDD9Y6dtGiRbLb7Ro3blzoCwOAa4RhSHa7TZK0fGNOUOdetilHix75nux2mxwOp1g8BAANkyXcBXzdsWPHZBiGevXqddFxt9t9ydhJkyZJkt54442Q1rR3717169fvorAmMTFRvXv31vbt2+VwOCRdeN/JqlWrNGTIEPXo0UMej0fl5eUBXycvL0//+te/NGHCBMXFxcnpdFZ738C1JFRL5CUpJsZ+yXvKAAAA8O3l5eVpzpw5SktL04gRI2oc+8UXX+jVV1/VjBkzLnpnEQCgZna7TRaLoZy809Vuuf5tHMsvUU7eaVkshj+AAgA0PKYKhzwej5o0aSKr9T8LmqKiovwBzNclJCQoNjZWBw8eDGlNLpdLjRo1uuR4o0aN5Ha7dfjwYUlSbm6uCgsLlZKSotmzZ6tnz57q2bOnhg0bpg8//LDW6+zcuVPShfsaO3asevTooRtvvFETJ07Ul19+GdybAkwiVEvksw4WymKxKDqaX0AAAAAEU1FRkSZPnqzY2FgtXLhQERERNY5/6qmnlJaWpqFDh9ZRhQBwbbDZLvz/67v7T4Zk/ve+mrfqOgCAhsdU4VBiYqIqKiouOhYfH6/KykodP378ouNut1ulpaUqKQnu0xPf1L59e+3bt0+VlZX+Yy6Xy/8eoIKCAknS0aMXfrG9cuVK7d69W3PmzNHcuXPlcrk0ceLEWreGOHLkiCRp9uzZstlseuaZZ/Twww/ro48+0n333XdFq5CA+iDUS+SlC/OzeAgAACA4SkpKdP/996ukpETLli1TUlJSjeN37typd999V2PGjNGXX37p/8/j8aiiokJffvmlSktL66h6AKhfrNYLv7LLPX4uJPNXzVt1HQBAw2Oqdw61atVKX375pfLz85WcnCxJ6t69u06cOKGNGzdq6tSp/rHr16+X1+tVq1atQlrT6NGj9fjjj2vWrFmaOHGivF6vFi9erKKiIknyh1lVq5scDoc2bNigli1bSpL69u2rW2+9VcuWLdP8+fMve52ysjJJUosWLbR06VJZLBf+ck5OTtZDDz2kt956S6NGjQrZfQJ1rS6WyKd2TJDdblNFBVs0AgCA+ik/P1+ZmZkqKChQWVmZfDW8GOLr/VKwOZ1OTZkyRUeOHNHLL7+sTp061XrOqVOnLltXQUGBvv/97+vRRx/lXUQAUI2IiAu/FzqWfz4k81f14VXXAQA0PKYKh3r16qXdu3dr165dGj58uCRp+PDh+tvf/qYlS5aouLhY3bp104EDB/TGG2/IMAwNGTIkpDXdfffdys/P1/Lly7V+/XpJUmpqqiZMmKAlS5YoOjpakvxbz6Wnp/uDIelC4JWenq6srKwar1N1fkZGhj8Yqvp5xowZyszMJBzCNaUulsindkyQzRZBOAQAAOqdM2fO6PHHH9c//vGPGgMhSfL5fDIMI2ThUGVlpaZNm6Z9+/bphRdeUFpaWrXjCgsLVVJSorZt28pms6lv375atGjRJeNmz56tVq1a6ec//7m6dOkSkpoB4FrhdFfWPuhq5nV5QjIvAKD+MFU4lJGRofXr1+vDDz/0h0ODBw/W7bffrr/+9a/605/+5B/r8/nUsWNH/c///E/I65o+fbrGjx+vw4cPKzY2VikpKVqwYIEkqV27dpIubIknXXhn0DfFx8frs88+q/Ealzs/IiJCzZo10/nzoXlSBAgXlsgDAABUr6ysTGPGjFFeXp5sNpu6du2q7Oxs2Ww29ejRQ6dPn/Zva920adOQByzz5s3Ttm3bdMstt+jcuXPauHHjRZ9X9W4LFizQ+vXr9c4776h169Zq1apVtTs9/O53v1NCQkLIH/QDgGuB3RahClfwAyJ7pKl+JQgACANT/U3QuXNnbdu27ZLj8+fPV58+fbR582adOnVKsbGxGjhwoMaPH6/Y2Ng6qa1p06bq1auX/+cPPvhAycnJ6tChgySpS5custls/ncQfV1hYaGaN29e4/w33HCDJF1yvsvl0tmzZ2s9H6hvWCIPAABQvdWrVys3N1cdOnTQypUrlZiYqK5du6pp06ZavXq1JOnEiROaP3++tmzZooEDB2rSpEkhq6fq/anbt2/X9u3bL/m8KhwCAARPZaVXVmuE2iY30aFjZ4M+f9ukWP91AAANk6nCocsxDEM/+clP9JOf/CTcpUiSNm/erI8//lgzZ870bwEXExOjQYMGaceOHcrLy1PHjh0lSXl5ecrKytJdd93lP7+8vFwnT55UXFycP/Tp06eP4uPj9X//93+aMmWK7Ha7pAvvVqqsrFT//v3r+C6BusESeQAAgIv94x//kGEYeuihh/w7DHzTddddp2eeeUYPP/ywnnnmGXXv3l39+vULST2vvvpqQOPmzZunefPm1TquugcCAQAX83guhEOd2qBzMl4AACAASURBVDQLSTjUqU0z/3UAAA0Tj9TXYs+ePRo3bpxeeuklrV27Vo899ph+9atfaeDAgRozZsxFYx966CHZ7XaNHTtWS5cu1dKlSzV27Fg1bdpUU6ZM8Y/Lzs7WD37wA/9Tf5IUGRmpGTNm6Pjx47rnnnv06quv6ve//72eeOIJ9erVS7feemud3TNQl+xfvXso6POyRB4AANRTn3/+uSRp0KBBFx33eC59+GXatGny+XwBBzgAgPrB/dWDlANvvHR7zmAY8NW87hA9sAkAMD9+e1qLpKQkRUREaPny5XI4HGrdurWmTZumcePGyWq9+I+vU6dOeu211zR//nwtXrxYhmGob9++mjFjhpKSkmq91p133imbzaalS5fq6aefVpMmTXTXXXdp+vTpiogIzS/QgXBhiTwAAED1nE6nmjRposjISP8xu92usrKyS8a2adNGsbGxys7OrssSAQAh5nS6FR1tV2rHBLVNivVvnR4MbZNjldoxQV6vT06nO2jzAgDqF1OFQxs2bLiq8+68884gV/Ifbdu21fLlywMef8MNN+jll1+ucUyfPn108ODBaj+7/fbbdfvtt19RjUB9xBJ5AACA6iUkJOjs2Yv/fdS8eXPl5+crPz9fycnJ/uOVlZUqLy9XeXl5XZcJAAghn+9CQBQVFamJw1P1m6U7gzb3xDtSJV2Y3+cL2rQAgHrGVOHQr3/9axmGcUXnGIYR0nAIQGi43ZVq1MimgTe20ub3vwj6/CyRBwAA9VXLli116tQpFRcXKz4+XpLUtWtX5efna+vWrfrZz37mH7tt2zZ5PJ6AdioAANQvDodLdrtVaSmJyuh7vd7+8Oi3njOj7/VKS0mU1+uVw+EKQpUAgPrKVOFQq1Y176NaWlqq8+fPS5KioqIUFxdXF2UBCAGWyAMAAFSvZ8+eyszM1N69ezV06FBJ0g9+8ANt375dCxYskNPpVLdu3XTgwAH/dtbffD8RAKD+8/l8Ki11qkmTKE0e0UOFZ8uVebDwqudLT0nU5BE9JEmlpU75WDYEAA2a4atnfxMcOXJEixcv1ttvv625c+fqBz/4QbhLQjWKi0vl9darrxbCICbGrqioSGUdLAzqEvk5k/opLSVR5eUulZY6gzYvAAAIDYvFUHx8TLjLMI39+/frrrvu0ve+9z298MILki78gnDs2LHavXv3Rbst+Hw+JSQkaN26daweqiP0OgDqWnS0XY0bR8rj8erF9dlXtYIoo+/1mjyih6xWi8rKXHI46JUBoC6Yudepd+FQlVmzZmnTpk1644031K1bt3CXg2+gYUIgDMNQ8+aNZbFYtGjtvqAtkf+fUT3l9Xp15kwZT0IBAFAPmLlhMpOKigotXrxYmzdv1qlTpxQbG6uBAwdq2rRpte7CgOCh1wEQDlUBkSRlHSzUsk05OpZf+w4cbZNjNfGOVKWlJEoSwRAA1DEz9zr1NhzKz8/X4MGDlZGRoWeffTbc5eAbaJgQKLvdqiZNouTxePXEil3feon87PF9ZLVadP58uZxOTxArBQAAoWLmhgn4JnodAOFit1sVE2OXxWKRJOXkndZ7+08q9/g5HSsokdPlkT3SqrZJserUppkG3NhKqR0TJEler1elpU76ZACoY2budeptOCRJvXv3ls1m0/vvvx/uUvANNEy4EiyRBwCgYTNzwxQOGzZskCQNGDBACQkJYa4G30SvAyCcDMNQdHSk7HabLBaj1vFV7+J1OFzsrAEAYWDmXqfehkNOp1NpaWmyWq3Kzs4Odzn4BhomXCmWyAMA0HCZuWEKh65du8pqtWrPnj2KiooKdzn4BnodAGZgGJLdbpPNFiGr1aKICIv/s8pKrzwer9zuSjmdbtXP3/wBwLXBzL2ONdwFXK1169bJ6/Xy0lXgGuFwOOXxVComxq60lEQteuR7LJEHAAANUtOmTSWJYAgAcFk+n1RR4VZFhTvcpQAA6ilThUMnT56s8XOn06n8/Hxt2bJF69atk2EYGjJkSB1VByDUnE6PXK5K/xL51I4J/vDnclgiDwAArjUdOnRQdna2HA6HoqOjw10OAAAAgGuQqcKh73//+wGP9fl86ty5s37xi1+EsCIAdc3n86m01CmHw8kSeQAA0CCNHDlSWVlZWrt2rcaNGxfucgAAAABcg0wVDgX61H/btm11++236/7771fjxo1DXBWAcGCJPAAAaKhGjRqld999V/Pnz5fNZtNdd90lq9VUrRsAAACAes7wmWgfphMnTtT4udVqVZMmTdh7ux7gJa0AAAAIlJlf0hoOjz76qCRpy5YtKi8vV5MmTdS9e3fFx8fLYrFUe45hGPrd735Xl2U2WPQ6AAAACJSZex1ThUO4dtAwAQAAIFBmbpjCoWvXrjIMI6CdFarGGYahzz77rA6qA70OAAAAAmXmXoe9CQAAAADARO68804ZhhHuMgAAAABcw0y1csjlcunzzz+XzWZTx44daxybl5cnt9utjh07ymaz1VGFCBRP0wEAACBQZn6aDvgmeh0AAAAEysy9TvUbVofJ5s2bNWLECL3yyiu1jl2yZIlGjBihLVu21EFlAAAAAAAAAAAA1wZThUN///vfJV3YRqE2P/7xj+Xz+QiHAAAAAAAAAAAAroCpwqHDhw8rIiJCPXr0qHVsenq6rFarDh06VAeVAQAAAAAAAAAAXBus4S7g6woLCxUbGyurtfaybDabYmJiVFhYWAeVAQAAAEDdKioq0rp16/TRRx8pPz9f5eXlutwrYw3D0D/+8Y86rhAAAABAfWWqcMhms8nhcAQ01ufzqaysLKAgCQAAAADqk61bt2rmzJm1BkJVnxmGUZflAQAAAKjnTJWstG7dWgcPHlRWVpbS0tJqHJuZmSmXy6Xrr7++jqoDAAAAgNDLzc3Vww8/LJfLpcGDB+vmm2/Wb3/7W8XGxmrmzJk6ffq0PvjgA+3evVtxcXGaOnWqGjduHO6yAQAAANQjpnrnUP/+/eXz+fTHP/5RHo/nsuM8Ho8WLFggwzB000031WGFAAAAABBaK1eulMvl0h133KElS5bo7rvvliTZ7Xb9+Mc/1pQpU7Rq1Sq9+OKLqqio0IYNG/TDH/4wzFUDAAAAqE9MFQ6NGTNGdrtdH330ke677z59+umnl4z55JNPNG7cOH300UeKjIzUmDFjwlApAAAAAITG7t27ZRiGJk+eXOO4m2++WTNnztTHH3+sV155pY6qAwAAAHAtMHyX28A6TDZs2KBHH33U/3NCQoKuu+46SdKJEyd0+vRp+Xw+GYahefPmafjw4eEqFTUoLi6V12uqrxYAAABMymIxFB8fE+4yTOPGG29UZWWlcnJy/Me6deumJk2aaNeuXReNLS8vV69evZSSkqI333yzrkttkOh1AAAAECgz9zqmeueQJN15551q1qyZnnjiCZ04cUJFRUUqKiq6aEybNm00e/ZsDRo0KExVAgAAAEBo2Gw2RUVFXXSscePGKikpkcfjkdX6nzYuKipK0dHROnbsWF2XCQAAAKAeM104JEmDBw/WwIEDtWvXLmVmZur06dMyDEMJCQlKT09Xnz59ZLGYakc8AAAAAAiKxMREHTt2TF6v19/3XHfddTp8+LAOHDig1NRU/9h///vfOn/+vOx2e7jKBQAAAFAPmTIckqSIiAj1799f/fv3D3cpAAAAAFBn2rVrpy+++EKff/65OnXqJElKT0/XoUOHtGLFCi1YsMA/9tlnn5UktW/fPiy1AgAAAKifTBsOAQAAAEBD1K9fP23btk3vvvuuPxz66U9/qjfeeEN/+9vfdOjQIaWkpOjQoUPKzc2VYRj60Y9+FOaqAQAAANQnEY8//vjj4S6iSnFxsf7yl7/o+PHj6tKlS41jN23apF27dqlNmzaX7MeN8Csvd8nHO1oBAAAQAMMw1LhxZLjLMI3rrrtOZ8+eVUxMjHr16iVJSkhIUNOmTfX++++ruLhYhw8f1pkzZyRJt99+ux555JFwltyg0OsAAAAgUGbudUy1cmjTpk16+umnNXXq1FrHHjhwQC+//LIkacyYMaEuDQAAAADqREJCgubOnXvJ8XvvvVf9+vXTli1blJ+fr5iYGA0cOFD9+vULQ5UAAAAA6jPD5zPPM08/+9nPtHfvXr311lvq2LFjjWMPHTqkO+64Q3369NErr7xSRxUiUMXFpfJ6TfPVAgAAgIlZLIbi42PCXQYQEHodAAAABMrMvY6pVg4dO3ZMkZGRtQZDktSlSxfZ7XYdP368DioDAAAAgPA4fPiwcnJyVFxcLEmKj49XamqqOnfuHObKAAAAANRXpgqHiouLFRMTeIoWFRWl06dPh7AiAAAAAAiP7du3a8GCBcrNza32806dOmnatGn6/ve/X8eVAQAAAKjvLOEu4OtiYmJUUlIip9NZ61in06mSkhJFRUXVQWUAAAAAUHeef/55/eIXv9Dhw4fl8/kUERGh+Ph4xcfHKyIiQj6fT4cPH9bUqVP13HPPhbtcAAAAAPWMqcKhzp07y+v1avv27bWO3bZtmyorK9W+ffs6qAwAAAAA6sa//vUvPf/88/L5fPrud7+rFStWKDMzU++9957ee+89ZWZmasWKFerdu7d8Pp9eeOEFvfvuu+EuGwAAAEA9Yqpw6Hvf+558Pp+efvppFRQUXHZcQUGBnn76aRmGoSFDhtRhhQAAAAAQWitXrpQkZWRkaNWqVerfv78iIyP9n0dGRqp///565ZVXlJGRIZ/P5z8HAAAAAAJh+Hw+X7iLqFJeXq7bbrtNBQUFatasmSZPnqzBgwerVatWkqSTJ09q+/btWrp0qc6ePavk5GRt3rxZjRs3DnPl+Kbi4lJ5vab5agEAAMDELBZD8fGBv3v0Wte7d2+VlJRox44dSkpKqnFsfn6+Bg8erCZNmmj37t11VGHDRq8DAACAQJm51zFVOCRJn3zyiSZOnKizZ8/KMIxqx/h8PsXFxWnFihXq1q1bHVeIQNAwAQAAIFBmbpjCIS0tTZGRkdq1a1dA4/v06SOXy6WsrKwQVwaJXgcAAACBM3OvY6pt5STphhtu0Pr16zVs2DD/i1a//p/VatWdd96pDRs2EAwBAAAAuOa0bt1aDodDLper1rEul0sOh0Nt2rSpg8oAAAAAXCus4S6gOsnJyfrDH/6gOXPmKCcnR0VFRTIMQy1atFBqaqoaNWpUp/Xk5OTomWeeUVZWlnw+n9LS0vTII49UG065XC6tWLFCGzZs0IkTJxQbG6vU1FTNmTNHycnJAV9z7969uueeeyRJO3fuVPPmzYN2PwAAAADMa9iwYVqwYIE2btyoUaNG1Th248aN8ng8+uEPf1hH1QEAAAC4FpgyHKoSFRWl7373u2Gt4ZNPPtHo0aPVsmVLTZ06VV6vV2vWrNG9996rtWvXqkOHDv6xbrdbkydPVlZWlkaNGqWUlBSdP39e+/fvV0lJScDhkNfr1ZNPPqnGjRurrKwsVLcGAAAAwITuu+8+/fOf/9STTz4pq9WqESNGVDtuw4YNevLJJ9WrVy+NHz++jqsEAAAAUJ+Z7p1DZjNp0iTt27dPW7ZsUVxcnCSpsLBQQ4cO1YABA/Tcc8/5x7700ktauHCh1qxZox49elz1NV9//XU9++yzuuOOO7Rq1ap6uXKIfbgBAAAQKDPvwx0Ozz//vNxut9asWaPS0lK1bNlSvXv3VlJSkiSpoKBAu3fv1qlTpxQbG6u7775bNput2rmmTp1al6U3CPQ6AAAACJSZex1Trhw6cOCAVq9erY8++kj5+fkqLy+/7FjDMPTpp5+GrJa9e/dq4MCB/mBIkhITE9W7d29t375dDodD0dHR8nq9WrVqlYYMGaIePXrI4/HI7XYrKirqiq537tw5Pfvss3rwwQd15syZYN8OAAAAAJN7/vnnZRiGJMnn8+nkyZPauHHjRWOqnvErKSnR0qVLLzsX4RAAAACA6pguHHrttdc0b948VVZWygyLmlwuV7XvOGrUqJHcbrcOHz6snj17Kjc3V4WFhUpJSdHs2bO1fv16ud1udenSRbNmzVLfvn0Dut7ChQvVokUL/fSnP9ULL7wQ7NsBAAAAYHLh3lobAAAAwLXPVOHQ/v379dRTT0mSRo8erZtvvlmTJk1S06ZN9eyzz+r06dP64IMP9NZbbykmJkaPPfaYWrRoEdKa2rdvr3379qmyslIRERGSLgRG2dnZki5s6SBJR48elSStXLlSzZo105w5cyRJL774oiZOnKi//OUv6tq1a43XOnDggP785z9r6dKl/msBAAAAaFheffXVcJcAAAAA4BpnCXcBX7dq1Sr5fD6NGTNGs2fP1qBBgyRJNptN/fr107BhwzR37ly98cYbMgxDCxcu1He+852Q1jR69GgdOXJEs2bNUm5urg4dOqSZM2eqqKhIklRRUSFJcjgc/v9duXKlRo4cqZEjR+rll1+WJC1btqzWaz311FMaNGiQBgwYEKK7AQAAAAAAAAAADZ2pwqGsrCwZhqExY8bUOK5bt2567LHHdOzYMS1fvjykNd19992aMmWK3nrrLd1+++0aNmyYjh07pgkTJkiSoqOjJcm/9Vx6erpatmzpP79Vq1ZKT09XVlZWjdfZvHmzsrKyNHPmzBDdCQAAAAAAAAAAgMnCodOnTysyMlLXXXed/5jFYpHT6bxk7H//93/LarVq69atIa9r+vTpev/997V69Wpt2rRJ69at878PqV27dpKkxMRESVJCQsIl58fHx+v8+fM1XuPpp5/W0KFDZbPZ9OWXX+rLL7/0n5Ofn+/fvg4AAAAAAAAAAODbMNU7h6Kioi45Fh0drdLSUrlcLkVGRvqP22w2RUVF6cSJE3VSW9OmTdWrVy//zx988IGSk5PVoUMHSVKXLl1ks9mqDXEKCwvVvHnzGuc/deqU3nrrLb311luXfDZixAh17dpVGzdu/JZ3AQAAAAAAAAAAGjpThUOJiYn64osv5PF4ZLVeKK1Nmzb67LPPlJ2dfVE4U1BQoJKSkmoDpVDbvHmzPv74Y82cOVMWy4XFVzExMRo0aJB27NihvLw8dezYUZKUl5enrKws3XXXXf7zy8vLdfLkScXFxflDo0WLFl1ynb/+9a/avHmzfv/73ys5ObkO7gwAAAAAAAAAAFzrTBUOdezYUbm5uTp06JC+853vSJL69OmjTz/9VC+88IIWL14su90ul8ulp556StKFFTuhtGfPHi1atEg33XSTmjVrpv379+vNN9/UwIEDL3k30kMPPaSdO3dq7Nix/s9WrVqlpk2basqUKf5x2dnZGjNmjKZOnaoHHnhAkjRkyJBLrv3ZZ59JkgYNGlTryiMAAAAAAAAAAIBAmCocuummm/T2229r27Zt/nBo9OjRWr16tXbu3KlBgwapffv2OnLkiP7973/LMAzdc889Ia0pKSlJERERWr58uRwOh1q3bq1p06Zp3Lhx/tVNVTp16qTXXntN8+fP1+LFi2UYhvr27asZM2YoKSkppHUCAAAAAAAAAAAEwvD5fL5wF1Hl/PnzWrVqlZKSkjRq1Cj/8a1bt+rRRx9VaWmp/5jFYtGECRP08MMPh6NU1KK4uFRer2m+WgAAADAxi8VQfHxMuMsAAkKvAwAAgECZudcxVThUk3Pnzumf//yn8vPzFRMTowEDBuj6668Pd1m4DBomAAAABMrMDRPwTfQ6AAAACJSZe516Ew6hfqFhAgAAQKDM3DDhwjtTN2zYoF27dunEiRNq1qyZbrzxRk2bNk3t27ev8dydO3dq06ZNyszMVH5+vhISEtS3b1/98pe/VGJiYh3dQXDR6wAAACBQZu51CIcQEjRMAAAACJSZGyZIDz74oDIzM5WRkaGUlBQVFRVp9erVKisr05///Gd16dLlsueOHDlS//73v5WRkaF27drp+PHjeu211xQVFaUNGzaoRYsWdXgnwUGvg2/DMCS73SabLUJWq0URERb/Z5WVXnk8XrndlXI63eK3NQAA1H9m7nUIhxASNEwAAAAIlJkbJkiZmZlKTU1VZGSk/9iRI0c0bNgwDR06VPPnz7/suXv27NF//dd/yWKxXHTs3nvv1ZQpUzR9+vSQ1h4K9Dq4GoZhKDo6Una7TRaLUet4r9cnp9Mth8Mlfm0DAED9ZeZexxruAgAAAAAA5pWenn7JsXbt2qlz5876/PPPazz3u9/9brXHmjVrVuu5wLXCbrcqJsbuD0lz8k7r3f0nlXv8nI7ln5fTXSm7LUJtk5uoU5tmGnhjK6V2TFBUVKTsdqtKS51yOj1hvgsAAHCtIRwCAAAAAFwRn8+n06dPq3Pnzld8rsPhkMPhUFxcXAgqA8wlOtquxo0vrLrLOlioZRtzdKyg5JJxFa5KHTp2VoeOndXm979Q2+RYTbwjVWkpiWrSJEplZS45HM66Lh8AAFzDLLUPAQAAAADgPzZt2qSCggLddtttV3zuK6+8IrfbfVXnAvVJVTDk8Xi1aO0+/WbpzmqDoeocyy/Rb5bu1KK1++TxeNW4caSio+0hrhgAADQkhEMAAAAAgIDl5eVpzpw5SktL04gRI67o3D179mjRokW67bbb1K9fvxBVCISf3W71B0NPrNiltz88elXzvP3hUT2xYpc/ILLb2QAGAAAEB+EQAAAAACAgRUVFmjx5smJjY7Vw4UJFREQEfG5eXp6mTp2qzp0768knnwxhlUB4GYahmJgLq3xeXJ+tzIOF32q+zIOFenF9tiQpJsYuwzC+dY0AAACEQwAAAACAWpWUlOj+++9XSUmJli1bpqSkpIDPPXXqlCZMmKCYmBgtXbpUMTExIawUCK/o6EhZLBZlHiy86hVD3/T2h0eVdbBQFotF0dGRQZkTAAA0bIRDAAAAAIAaOZ1OTZkyRUeOHNGSJUvUqVOngM89e/asxo8fL5fLpeXLlysxMTGElQLhZRiS3W6TJC3fmBPUuZdtujCf3W4Ti4cAAMC3RTgEAAAAALisyspKTZs2Tfv27dPChQuVlpZW7bjCwkLl5eXJ7Xb7j5WVlWnSpEkqKCjQ0qVL1a5duzqqGggPu90mi8VQTt5pHSsoCercx/JLlJN3WhaL4Q+gAAAArhZvMgQAAAAAXNa8efO0bds23XLLLTp37pw2btx40efDhw+XJC1YsEDr16/XO++8o9atW0uSfvWrXyk7O1s/+tGPlJeXp7y8PP950dHRGjJkSN3dCFAHbLYL7+F6d//JkMz/3v6TSu2YIJstQhUV7tpPAAAAuAzCIQAAAADAZR04cECStH37dm3fvv2Sz6vCoZrOXbdundatW3fRZ9dddx3hEK45VuuFDVpyj58LyfxV81ZdBwAA4GoZPp/PF+4icO0pLi6V18tXCwAAALWzWAzFx8eEuwwgIPQ6qElCQowMw9CoR99Shasy6PNH2a1643e3y+fz6fTp0qDPDwAAgsvMvQ6PmgAAAAAAAASR0x38YEiSnC5PSOYFAAAND+EQAAAAAABAENm/evdQ0OeN5O0AAAAgOAiHAAAAAAAAgqCy0itJapvcJCTzt02Kveg6AAAAV4twCAAAAAAAIAg8nguhTac2zUIyf9W8VdcBAAC4WoRDAAAAAAAAQeD+6l1DA29sFZL5B3w1rztE7zQCAAANB+EQAAAAAABAEDidbnm9PqV2TPBvARcsbZNjldoxQV6vT06nO6hzAwCAhodwCAAAAAAAIAh8PvmDm4nDU4M698Q7LszndLrl8wV1agAA0AARDgEAAAAAAASJw+GS1+tVWkqiMvpeH5Q5M/per7SURHm9XjkcrqDMCQAAGjbCIQAAAAAAgCDx+XwqLXVKkiaP6KH0lMRvNV96SqImj+ghSSotdcrHsiEAABAEhEMAAAAAAABB5HR6VFbmktVq0ezxfa56BVFG3+s1e3wfWa0WlZW55HR6glwpAABoqAwfj5wgBIqLS+X18tUCAABA7SwWQ/HxMeEuAwgIvQ6uRHS0XY0bR0qSsg4WatmmHB3LL6n1vLbJsZp4R6rSvlp1VFbmksPhDGmtAAAg+Mzc6xAOISRomAAAABAoMzdMwDfR6+BK2e1WxcTYZbFc2LwlJ++03tt/UrnHz+lYQYmcLo/skVa1TYpVpzbNNODGVkrtmCBJ8nq9Ki11smIIAIB6ysy9jjXcBQAAAAAAAFyrnE6PXK5KRUdHym63KbVjgj/8uRyv1yen0y2Hw8U7hgAAQEgQDgEAAAAAAISQz+dTaalTDodTdrtNNluErFaLIiL+8yroykqvPB6v3O5KOZ1ukQkBAIBQIhwCAAAAAACoAz6fVFHhVkWFO9ylAACABs5S+xAAAAAAAAAAAABcKwiHAAAAAAAAAAAAGhDCIQAAAAAAAAAAgAaEcAgAAAAAAAAAAKABIRwCAAAAAAAAAABoQAiHAAAAAAAAAAAAGhDCIQAAAAAAAAAAgAaEcAgAAAAAAAAAAKABIRwCAAAAAAAAAABoQAiHAAAAAAAAAAAAGhBruAuoD3JycvTMM88oKytLPp9PaWlpeuSRR9StW7dLxrpcLq1YsUIbNmzQiRMnFBsbq9TUVM2ZM0fJycmXvcapU6e0bt067dixQ0ePHpXFYlGXLl3085//XP379w/l7QEAAAAAAAAAgAaEcKgWn3zyiUaPHq2WLVtq6tSp8nq9WrNmje69916tXbtWHTp08I91u92aPHmysrKyNGrUKKWkpOj8+fPav3+/SkpKagyH3nnnHb300ksaMmSIRowYIY/Ho40bN+q+++7T7373O/3oRz+qi9sFAAAAAAAAAADXOMPn8/nCXYSZTZo0Sfv27dOWLVsUFxcnSSosLNTQoUM1YMAAPffcc/6xL730khYuXKg1a9aoR48eV3Sdw4cPKz4+Xs2bN/cfc7lcGj58uMrKyvTPf/4zODdUR4qLS+X18tUCAABA7SwWQ/HxMeEuAwgIvQ4AAAACZeZeh3cO1WLvYMmO1AAAIABJREFU3r3q16+fPxiSpMTERPXu3Vvbt2+Xw+GQJHm9Xq1atUpDhgxRjx495PF4VF5eHvB1OnfufFEwJEmRkZG6+eablZ+fr9LS0uDcEAAAAAAAAAAAaNAIh2rhcrnUqFGjS443atRIbrdbhw8fliTl5uaqsLBQKSkpmj17tnr27KmePXtq2LBh+vDDD6/6+kVFRYqKilJUVNRVzwEAAAAAAAAAAFCFcKgW7du31759+1RZWek/5nK5lJ2dLUkqKCiQJB09elSStHLlSu3evVtz5szR3Llz5XK5NHHiRB04cOCKr3306FFt3bpVt956qyIiIoJwNwAAAAAAAAAAoKEjHKrF6NGjdeTIEc2aNUu5ubk6dOiQZs6cqaKiIklSRUWFJPm3l3M4HFq5cqVGjhypkSNH6uWXX5YkLVu27IquW15erl/+8pdq1KiRHn744SDeEQAAAAAAAAAAaMis4S7A7O6++27l5+dr+fLlWr9+vSQpNTVVEyZM0JIlSxQdHS1J/q3n0tPT1bJlS//5rVq1Unp6urKysgK+ZmVlpaZPn67c3Fy99NJLSkpKCuIdAQAAAAAAAACAhoyVQwGYPn263n//fa1evVqbNm3SunXr5PP5JEnt2rWTJCUmJkqSEhISLjk/Pj5e58+fD/h6jz32mHbs2KF58+apX79+3/4GAAAAAAAAAAAAvsLKoQA1bdpUvXr18v/8wQcfKDk5WR06dJAkdenSRTabzf8Ooq8rLCxU8+bNA7rO73//e7355pv6f//v/+mHP/xhcIoHAAAAAAAAAAD4CiuHrsLmzZv18ccfa+zYsbJYLvwRxsTEaNCgQcrKylJeXp5/bF5enrKystS/f3//sfLycuXl5enMmTMXzbts2TKtWLFCU6ZM0dixY+vmZgAAAAAAAAAAQINi+Kr2R0O19uzZo0WLFummm25Ss2bNtH//fr355pvq37+/lixZIqv1P4uvcnNzNWrUKEVHR2vMmDGSpFWrVqmyslIbNmzwvzto165dGjNmjKZOnaoHHnhAkrR161ZNnTpV7dq10y9+8YtL6rjpppuq3bLOrIqLS+X18tUCAABA7SwWQ/HxMeEuAwgIvQ4AAAACZeZeh23lapGUlKSIiAgtX75cDodDrVu31rRp0zRu3LiLgiFJ6tSpk1577TXNnz9fixcvlmEY6tu3r2bMmOEPhi7nwIEDkqQjR45oxowZl3y+atWqehUOAQAAAAAAAAAAc2LlEEKCp+kAAAAQKDM/TQd8E70OAAAAAmXmXod3DgEAAAAAAAAAADQghEMAAAAAAAAAAAANCOEQAAAAAAAAAABAA0I4BAAAAAAAAAAA0IAQDgEAAAAAAAAAADQghEMAAAAAAAAAAAANCOEQAAAAAAAAAABAA0I4BAAAAAAAAAAA0IAQDgEAAAAAAAAAADQghEMAAAAAAAAAAAANCOEQAAAAAAAAAABAA0I4BAAAAAAAAAAA0IAQDgEAAAAAAAAAADQghEMAAAAAAAAAAAANCOEQAAAAAAAAAABAA0I4BAAAAAAAAAAA0IAQDgEAAAAAAAAAADQg1nAXAAAAAAAwr+zsbG3YsEG7du3SiRMn1KxZM914442aNm2a2rdvX+v558+f1x/+8Adt3bpVFRUV6t69u37961/rhhtuqIPqAQAAAFTH8Pl8vnAXgWtPcXGpvF6+WgAAAKidxWIoPj4m3GXgMh588EFlZmYqIyNDKSkpKioq0urVq1VWVqY///nP6tKly2XP9Xq9Gj16tA4ePKgJEyYoLi5Oa9as0alTp/Tmm2+qXbt2dXcjQUKvAwAAgECZudchHEJI0DABAAAgUGZumCBlZmYqNTVVkZGR/mNHjhzRsGHDNHToUM2fP/+y527evFnTp0/XwoULlZGRIUk6c+aMhg4dqkGDBumPf/xjyOsPNnodAAAABMrMvQ7vHAIAAAAAXFZ6evpFwZAktWvXTp07d9bnn39e47lbtmxRQkKCbr31Vv+x5s2b67bbbtM777wjl8sVkpoBAAAA1IxwCAAAAABwRXw+n06fPq24uLgax3322Wf6zne+I4vl4taze/fuKi8v1xdffBHKMgEAAABcBuEQAAAAAOCKbNq0SQUFBbrttttqHFdUVKQWLVpccjwxMVGSVFhYGJL6AAAAANSMcAgAAAAAELC8vDzNmTNHaWlpGjFiRI1jKyoqLtmSTpL/mNPpDEmNAAAAAGpGOAQAAAAACEhRUZEmT56s2NhYLVy4UBERETWOb9SoUbXvFao6ZrfbQ1InAAAAgJpZw10AAAAAAMD8SkpKdP/996ukpESrV69WUlJSree0aNFCRUVFlxyv2k6uans5AAAAAHWLlUMAAAAAgBo5nU5NmTJFR44c0ZIlS9SpU6eAzuvatas+/fRTeb3ei45nZ2crKipK7du3D0W5AAAAAGpBOAQAAAAAuKzKykpNmzZN+/bt08KFC5WWllbtuMLCQuXl5cntdvuPZWRk6PTp0/r73//uP3bmzBm9/fbbuuWWW6p9HxEAAACA0GNbOQAAAADAZc2bN0/btm3TLbfconPnzmnjxo0XfT58+HBJ0oIFC7R+/Xq98847at26tSRp6NCh6tmzpx599FHl5uYqLi5Or7/+uiorK/XAAw/U+b0AAAAAuIBwCAAAAABwWQcOHJAkbd++Xdu3b7/k86pwqDoRERFaunSpnn76ab366qtyOp3q3r275s6dqw4dOoSsZgAAAAA1M3w+ny/cReDaU1xcKq+XrxYAAABqZ7EYio+PCXcZQEDodQAAABAoM/c6vHMIAAAAAAAAAACgASEcAgAAAAAAAAAAaEAIhwAAAAAAAAAAABoQwiEAAAAAAAAAAIAGhHAIAAAAAAAAAACgASEcAgAAAAAAAAAAaEAIhwAAAAAAAAAAABoQwiEAAAAAAAAAAIAGhHAoADk5OZowYYLS09OVlpam8ePH67PPPqt2rMvl0pIlS5SRkaHu3burf//+mjRpkvLz8wO61tq1a3Xbbbepe/fuuvXWW/Xqq68G81YAAAAAAAAAAEADZw13AWb3ySefaPTo0WrZsqWmTp0qr9erNWvW6N5779XatWvVoUMH/1i3263JkycrKytLo0aNUkpKis6fP6/9+/erpKREycnJNV7rT3/6k/73f/9XQ4cO1X333ae9e/fqySefVHl5uSZNmhTqWwUAAAAAAAAAAA2A4fP5fOEuwswmTZqkffv2acuWLYqLi5MkFRYWaujQoRowYICee+45/9iXXnpJCxcu1Jo1a9SjR48ruk5FRYVuvvlm9ezZUy+++KL/+K9+9Su988472rFjh5o2bRqcm6oDxcWl8nr5agEAAKB2Fouh+PiYcJcBBIReBwAAAIEyc6/DtnK12Lt3r/r16+cPhiQpMTFRvXv31vbt2+VwOCRJXq9Xq1at0pAhQ9SjRw95PB6Vl5cHfJ1du3bp3LlzGj169EXH77nnHpWVlWnHjh1BuR8AAAAAAAAAANCwEQ7VwuVyqVGjRpccb9Sokdxutw4fPixJys3NVWFhoVJSUjR79mz17NlTPXv21LBhw/Thhx/Wep1PP/1UkpSamnrR8RtuuEEWi+Wy7zgCAAAAAAAAAAC4EoRDtWjfvr327dunyspK/zGXy6Xs7GxJUkFBgSTp6NGjkqSVK1dq9+7dmjNnjubOnSuXy6WJEyfqwIEDNV6nqKhIERERio+Pv+h4ZGSkmjVrpsLCwmDeFgAAAAAAAAAAaKAIh2oxevRoHTlyRLNmzVJubq4OHTqkmTNnqqioSNKFdwVJ8m8v53A4tHLlSo0cOVIjR47Uyy+/LElatmxZjdepqKiQzWar9jO73e6/DgAAAAAAAAAAwLdhDXcBZnf33XcrPz9fy5cv1/r16yVd2PptwoQJWrJkiaKjoyXJv/Vcenq6WrZs6T+/VatWSk9PV1ZWVo3XqdqmrjpOp7Pare0AAAAAAAAAAACuFCuHAjB9+nS9//77Wr16tTZt2qR169bJ5/NJktq1aydJSkxMlCQlJCRccn58fLzOnz9f4zVatGihyspKFRcXX3Tc5XLp3Llz/vkBAAAAAAAAAAC+DcKhADVt2lS9evVSSkqKJOmDDz5QcnKyOnToIEnq0qWLbDab/x1EX1dYWKjmzZvXOH+3bt0kSTk5ORcdz8nJkdfrVdeuXYNxGwAAAAAAAAAAoIEjHLoKmzdv1scff6yxY8fKYrnwRxgTE6NBgwYpKytLeXl5/rF5eXnKyspS//79/cfKy8uVl5enM2fO+I/17dtXzZo10+uvv37RtV5//XVFRUVp8ODBob0pAAAAAAAAAADQIEQ8/vjjj4e7CDPbs2ePZs2apaKiIn3++ed6/fXXtXDhQg0YMECzZs3yh0OSlJKSojfffFN//etf5fF49NFHH+m3v/2tIiMjNX/+fMXExEiSMjMz9eMf/1hRUVHq06ePJMlqtapx48Z65ZVXdPjwYZWWlmrVqlXauHGjHnjgAQ0YMCAs93+1ystd+mrnPQAAAKBGhmGocePIcJcBBIReBwAAAIEyc69jDXcBZpeUlKSIiAgtX75cDodDrVu31rRp0zRu3DhZrRf/8XXq1Emvvfaa5s+fr8WLF8swDPXt21czZsxQUlJSrde65557ZLPZtGLFCm3btk0tW7bUo48+qrFjx4bq9gAAAAAAAAAAQANj+Hw884TgKy4uldcb/K+WYUh2u002W4SsVosiIv6zcquy0iuPxyu3u1JOp5un+QAAAOoJi8VQfHxMuMsAAkKvAwAAgECZuddh5RDqBcMwFB0dKbvdJovFqHaM1RohqzVCjRrZFB1tl9PplsPhEvknAAAAALOi1wEAAEA4EA7B9Ox2q2Ji7P73O+Xknda7+08q9/g5Hcs/L6e7UnZbhNomN1GnNs008MZWSu2YoKioSNntVpWWOuV0esJ8FwAAAABwMXodAAAAhAvbyiEkgrXVQnS03f/CrqyDhVq2MUfHCkpqPa9tcqwm3pGqtJRESVJZmUsOh/Nb1wMAAIDgM/NWC8A30esAAAAgUGbudQiHEBLBaJiqmiWPx6sX12fr7Q+PXvEcGX2v1+QRPWS1WmiaAAAATMrMDRPwTfQ6AAAACJSZex1L7UOAume3W/3N0hMrdl1VsyRJb394VE+s2CWPx6vGjS9svQAAAAAA4UKvAwAAADMgHILpGIahmBi7JOnF9dnKPFj4rebLPFioF9dnS5JiYuwyjOpf8goAAAAAoUSvAwAAALMgHILpREdHymKxKPNg4VU/RfdNb394VFkHC2WxWBQdHRmUOQEAAADgStDrAAAAwCwIh2AqhiHZ7TZJ0vKNOUGde9mmC/PZ7TbxQB0AAACAukSvAwAAADMhHIKp2O02WSyGcvJO61hBSVDnPpZfopy807JYDH9TBgAAAAB1gV4HAAAAZkI4BFOx2SIkSe/uPxmS+d/7at6q6wAAAABAXaDXAQAAgJkQDsFUrNYLX8nc4+dCMn/VvFXXAQAAAIC6QK8DAAAAM+FfjTCViIgLX8lj+edDMn/V9g1V1wEAAACAukCvAwAAADPhX40wJae7MjTzujwhmRcAAAAAAkGvAwAAADMgHIIp2UO0T7Y90hqSeQEAAAAgEPQ6AAAAMAPCIZhKZaVXktQ2uUlI5m+bFHvRdQAAAACgLtDrAAAAwEwIh2AqHs+FRqZTm2Yhmb9q3qrrAAAAAEBdoNcBAACAmRAOwVTcX+2/PfDGViGZf8BX87pDtM83AAAAAFSHXgcAAABmQjgEU3E63fJ6fUrtmODfFiFY2ibHKrVjgrxen5xOd1DnBgAAAICa0OsAAADATAiHYCo+n/zNzMThqUGde+IdF+ZzOt3y+YI6NQAAAADUiF4HAAAAZkI4BNNxOFzyer1KS0lURt/rgzJnRt/rlZaSKK/XK4fDFZQ5AQAAAOBK0OsAAADALAiHYDo+n0+lpU5J0uQRPZSekvit5ktPSdTkET0kSaWlTvl4lA4AAABAGNDrAAAAwCwIh2BKTqdHZWUuWa0WzR7f56qfqsvoe71mj+8jq9WisjKXnE5PkCsFAAAAgMDR6wAAAMAMDB+PFiEEiotL5fV++69WdLRdjRtHSpKyDhZq2aYcHcsvqfW8tsmxmnhHqtK+ehKvrMwlh8P5resB/j979x4dZX3ncfzzzCVPyEy4J4Q7iiWgOYS0CFIjZ5duNRQEPAWii1AUq672VIOnylZxtfWyu6UUaj2UyuW47iJ4IdwvK14WIgiFIBBIs9UawyohJEhgJslcMrN/YKYdEiRAJjOZeb/O8Q9+z+/5zfepOTVfPvP8fgAAoO1ZLIZ69HBGuwygVeh1AAAA0Fqx3OsQDiEi2qphkiTTtMnpNGWxnH/RreTTahUd+lKfHD+jipPn5PH6ZSbZNKBXqq7r31W52X2UNbinJCkQCMjl8vAtOgAAgBgWyw0TcCF6HQAAALRWLPc6hEOIiLZsmCTJMAw5HEkyTbssFuOS8wOBoDwen9xuL/tuAwAAxLhYbpiAC9HrAAAAoLViudexRbsAoDWaDm51uz0yTbvsdqtsNous1r8em9XYGJDfH5DP1yiPxyf6JAAAAACxjl4HAAAA0UA4hA4lGJQaGnxqaPBFuxQAAAAAaDP0OgAAAGhPhEOIiNZshwAAAABI/O6IjoWfVwAAALRWLP/uyJlDAAAAAAAAAAAACcRy6SkAAAAAAAAAAACIF4RDAAAAAAAAAAAACYRwCAAAAAAAAAAAIIEQDgEAAAAAAAAAACQQwiEAAAAAAAAAAIAEQjgEAAAAAAAAAACQQAiHAAAAAAAAAAAAEgjhEAAAAAAAAAAAQAIhHAIAAAAAAAAAAEgghEMAAAAAAAAAAAAJhHAIAAAAAAAAAAAggRAOAQAAAAAAAAAAJBDCIQAAAAAAAAAAgARCOAQAAAAAAAAAAJBACIcAAAAAAAAAAAASCOEQAAAAAAAAAABAAiEcAgAAAAAAAAAASCCEQwAAAAAAAAAAAAmEcAgAAAAAAAAAACCBEA4BAAAAAAAAAAAkEMIhAAAAAAAAAACABEI4BAAAAAAAAAAAkEAIhwAAAAAAAAAAABII4RAAAAAAAAAAAEACIRwCAAAAAAAAAABIIIRDAAAAAAAAAAAACYRwCAAAAAAAAAAAIIEQDgEAAAAAAAAAACQQwiEAAAAAAAAAAIAEQjgEAAAAAAAAAACQQAiHAAAAAAAAAAAAEgjhEAAAAAAAAAAAQAIhHAIAAAAAAAAAAEgghEMAAAAAAAAAAAAJhHAIAAAAAAAAAAAggRAOAQAAAAAAAAAAJBDCIQAAAABASHl5uQoKCjR27FhlZ2crLy9Pv/vd71RfXx82r7i4WHfddZeys7N1880367nnnpPb7W62ntfr1a9+9Svl5uZq+PDhmjZtmj788MP2ehwAAAAALTCCwWAw2kUAAAAAAKLvxIkTmjRpklJTU3XnnXeqS5cu+vjjj7V27VqNGzdOS5YskSSVlpYqPz9fgwcP1vTp01VZWakVK1Zo9OjRWrZsWdiac+fO1fbt2zVr1iwNGjRIhYWFOnLkiF599VWNHDkyGo8JAAAAJDxbtAsAAAAAAMSG9evX6+zZs1q1apW+9a1vSZLy8/MVCAS0bt061dbWqkuXLlq4cKE6d+6s1157TU6nU5LUr18/PfXUUyoqKlJubq4k6fDhw9q8ebMef/xxzZkzR5I0ZcoUTZw4UQsWLNDq1auj86AAAABAgmNbOQAAAACAJMnlckmSevToETaelpYmi8Uiu90ul8ul3bt3a9KkSaFgSJImT56slJQUbd26NTS2bds2Wa1W5efnh8ZM09TUqVN18OBBnThxIsJPBAAAAKAlhEMAAAAAAEnSqFGjJElPPvmkSktLdeLECW3ZskWvv/66Zs6cqZSUFJWVlcnv9ysrKyvs3qSkJA0bNkylpaWhsdLSUg0aNCgsRJKk4cOHh64DAAAAaH9sKwcAAAAAkCSNHTtWjzzyiJYuXar33nsvNP7ggw+qoKBAknTq1ClJUnp6erP709LSdODAgdCfT506pbS0tBbnSVJVVVWb1g8AAACgdQiH2kFJSYl+85vf6ODBgwoGg8rJydHPfvYzDRs2rNnc4uJi/epXv9KxY8fkdDo1fvx4FRQUyOFwRKFyAAAAAImmb9++GjlypG677TZ17dpVH3zwgZYuXaq0tDTdfffdamhokHT+TaELmaYZui5JDQ0NF53XdB0AAABA+yMcirCjR4/qH//xH9W7d2/95Cc/USAQ0KpVq3T33XfrzTff1LXXXhuaW1paqtmzZ2vw4MGaN2+eKisrtWLFCpWXl2vZsmVRfIrL99VXbgUCwWiXAQAAgA7AYjHUrRtfhooFmzdv1tNPP63t27crIyNDknTrrbcqGAxqwYIFmjBhgpKTkyVJXq+32f0ejyd0XZKSk5MvOq/pekdDrwMAAIDWiuVeh3AowhYvXqzk5GStXr1a3bp1kyRNmjRJt912m37zm9/opZdeCs1duHChOnfurNdeey20J3e/fv301FNPqaioSLm5uVF5hisRCARpmAAAADoIw5BM0y673SqbzSKr9a9HkzY2BuT3B+TzNcrj8SnIr3hxbdWqVRo2bFgoGGoybtw4rV27VqWlpd+4JdypU6fCtptLS0vTyZMnW5wntbw1Xayj1wEAAOg46HUuznLpKbga+/fv15gxY0LBkHS+ARo1apTef/99ud1uSZLL5dLu3bs1adKksMNaJ0+erJSUFG3durXdawcAAEB8MwxDTqep7t2dSk1NVnKyXTabVYZhhP6x2axKTrYrNTVZ3bs75XSaMgwj2qUjQqqrqxUIBJqN+3w+SZLf79eQIUNks9lUUlISNsfr9aq0tFRDhw4NjQ0dOlTl5eVyuVxhcw8dOiRJLW61DQAAAFwtep1L482hCPN6vS1ulZCcnCyfz6c///nPGjFihMrKyuT3+5WVlRU2LykpScOGDVNpaWl7lQwAAIAEYJo2OZ2mLJbz3xcr+bRauw59qU+On1FF5Vl5fI0y7VYNyOis6/p31S3ZfZQ1uKc6dUqSadrkcnnk8fij/BRoa9dcc42Kior02Wef6ZprrgmNb968WRaLRZmZmUpNTdWYMWO0YcMGPfTQQ6Evt61fv151dXXKy8sL3ZeXl6cVK1ZozZo1mjNnjqTzPdLatWuVnZ2t3r17t+8DAgAAIO7R67QO4VCEXXPNNfr444/V2Ngoq9Uq6XwzdPjwYUkKbbHwTdsqpKWl6cCBA+1UMQAAAOKdw2EqJSVJknSwrErL1peo4uS5ZvMavI3634qv9L8VX2nLh59pQEaq7puUpZzMdHXu3El1dV653Z72Lh8RNGfOHO3cuVMzZszQjBkz1LVrV33wwQfauXOnpk2bpl69ekmSCgoKdOedd2rmzJmaPn26KisrtXLlSuXm5mrs2LGh9bKzs5WXl6eFCxeqpqZGAwcOVGFhob744gs9//zz0XpMAAAAxCl6ndazPvPMM89Eu4h4ZhiG1q1bpy+//FKDBg1SdXW1XnzxRR05ckSBQEB///d/r6FDh6qkpEQ7duzQXXfdFWq4mrz//vv685//rPvvvz9KT3H56uu9CbdHIwAAQEfQ1Cz5/QEtefuQXllfolq3t1X31rq8ev/A/+mrsw36dma6TNMmwzDk8zVeVU2GYYQaOERX3759dcstt+izzz7T+++/rx07digYDOqee+7R3LlzQ9++bNoqe9++fSosLNSf//xnTZ48Wb/4xS+UlBT+73LcuHGqr6/Xxo0b9c4776h79+567rnnNGbMmGg84lWj1wEAAIhN9DqXhzeHIuyuu+5SZWWlli9frsLCQklSVlaW5syZo9///vdyOBySFNp6zutt/sPq8Xha3JoOAAAAuBymaQs1S79csVfFZVVXtM62jz5X1Vf1mn/v6K/Xa0yIbRcSxfDhw/XKK69cct7IkSO1evXqS84zTVNPPPGEnnjiibYoDwAAAGiGXufyWaJdQCIoKCjQhx9+qP/6r//Shg0b9Pbbbyv49VfNBg0aJOn81nGSVFXV/If21KlTLW43BwAAALRW04GskrS08PAVN0tNisuqtLTw/FbJiXZwKwAAAIDYQa9zZQiH2kmXLl00cuRIZWZmSpJ2796tjIwMXXvttZKkIUOGyGazqaSkJOw+r9er0tJSDR06tN1rBgAAQPxwOJJksVhUXFalbR993iZrbvvocx0sq5LFYpHDEZtbJQAAAACIb/Q6V4ZwKAq2bNmiI0eO6Ec/+lFoz+7U1FSNGTNGGzZskMvlCs1dv3696urqlJeXF61yAQAA0MEZhmSadknS8vUll5h9eZZtOL+eadoVp1+oAwAAABCj6HWuHGcORdgf//hHvfzyy7r55pvVtWtXHTp0SGvXrtUtt9yiWbNmhc0tKCjQnXfeqZkzZ2r69OmqrKzUypUrlZubq7Fjx0bpCQAAANDRmaZdFouhkk+rVXHyXJuuXVF5TiWfVitrcE+Zpl0NDb42XR8AAAAALoZe58oRDkVYr169ZLVatXz5crndbvXr10+PPvqoZs+eLZst/H/+G264QStXrtSCBQv04osvyuFwaOrUqZo7d26UqgcAAEA8sNutkqRdh76MyPpFh75U1uCestutcdcwAQAAAIhd9DpXjnAowgYMGKDly5e3ev7IkSO1evXqCFYEAACARGOznd/K+JPjZyKyftO6TZ8DAAAAAO2BXufKxd8TAQAAAAhjtZ7/tb+i8mxE1m/avqHpcwAAAACgPdDrXLn4eyIAAAAALfL4GiOzrtcfkXUBAAAAoDXodS4f4RAAAACQIMyv9+Nu83WT2K0aAAAAQPTQ61w+wiEAAAAgzjU2BiRJAzI6R2T9Ab1Swz4HAAAAANoDvc6VIxwCAAAA4pzff76Rua5/14is37Ru0+cAAAAAQHug17n4dBpjAAAgAElEQVRyhEMAAABAnPN9vf/2Ldl9IrJ+7tfr+iK0zzcAAAAAtIRe58oRDgEAAABxzuPxKRAIKmtwz9C2CG1lQEaqsgb3VCAQlMfja9O1AQAAAOCb0OtcOcIhAAAAIM4Fgwo1M/dNzmrTte+bdH49j8enYLBNlwYAAACAb0Svc+UIhwAAAIAE4HZ7FQgElJOZrrybBrbJmnk3DVROZroCgYDcbm+brAkAAAAAl4Ne58oQDgEAAAAJIBgMyuXySJIeuGO4vp2ZflXrfTszXQ/cMVyS5HJ5FIzHr9IBAAAAiHn0OleGcAgAAABIEB6PX3V1XtlsFs2/d/QVf6su76aBmn/vaNlsFtXVeeXx+Nu4UgAAAABoPXqdy2cE4zX2QlTV1LgUCPCjBQAAEIscDlMpKUmSpINlVVq2oUQVlecued+AjFTdNylLOV9/E6+uziu323PV9Vgshnr0cF71OkB7oNcBAACIXfQ6rUc4hIigYQIAAIhtpmmT02nKYjm/mUDJp9UqOvSlPjl+RhUnz8nj9ctMsmlAr1Rd17+rcrP7KGtwT0lSIBCQy+Vps2/RxXLDBFyIXgcAACC20eu0DuEQIoKGCQAAIPYZhiGHI0mmaZfFYlxyfiAQlMfjk9vtbdN9t2O5YQIuRK8DAAAQ++h1Ls0W7QIAAAAAREfTwa1ut0emaZfdbpXNZpHV+tejSRsbA/L7A/L5GuXx+MRXywAAAADEOnqdSyMcAgAAABJcMCg1NPjU0OCLdikAAAAA0GbodS7OcukpAAAAAAAAAAAAiBeEQwAAAAAAAAAAAAmEcAgAAAAAAAAAACCBEA4BAAAAAAAAAAAkEMIhAAAAAAAAAACABEI4BAAAAAAAAAAAkEAIhwAAAAAAAAAAABII4RAAAAAAAAAAAEACIRwCAAAAAAAAAABIIIRDAAAAAAAAAAAACYRwCAAAAAAAAAAAIIEQDgEAAAAAAAAAACQQwiEAAAAAAAAAAIAEQjgEAAAAAAAAAACQQAiHAAAAAAAAAAAAEgjhEAAAAAAAAAAAQAIhHAIAAAAAAAAAAEggtmgXkAjKy8u1ePFiHThwQLW1terdu7cmTpyoOXPmqFOnTpKkmTNnat++fc3uzc3N1fLly9u7ZAAAAAAAAAAAEKcIhyLsxIkTmjZtmlJTU3X33XerS5cu+vjjj/XSSy/p6NGjWrJkSWhuRkaG5s6dG3Z/enp6e5cMAAAAAAAAAADiGOFQhK1fv15nz57VqlWr9K1vfUuSlJ+fr0AgoHXr1qm2tlZdunSRJKWmpmry5MnRLBcAAAAAAAAAAMQ5wqEIc7lckqQePXqEjaelpclischut4eN+/1+eTweORyOdqsRAAAAACRp3rx5KiwsvOj1nTt3qlevXpe1LbbX69XixYtDX5zLzMzUo48+qptvvrnN6wcAAADQOoRDETZq1Ci98sorevLJJ/XTn/5UXbt21cGDB/X6669r5syZSklJCc0tLy/XiBEj5PP51LNnT02bNk0PP/xwswAJAAAAACIhPz9fY8aMCRsLBoN65pln1LdvX/Xq1Ss03tptsefNm6ft27dr1qxZGjRokAoLC3X//ffr1Vdf1ciRIyPzIAAAAAC+EeFQhI0dO1aPPPKIli5dqvfeey80/uCDD6qgoCD05/79+2v06NEaMmSI6urqtH37di1ZskTl5eVatGhRNEoHAAAAkGBycnKUk5MTNrZ//37V19fr9ttvDxtvzbbYhw8f1ubNm/X4449rzpw5kqQpU6Zo4sSJWrBggVavXt22DwAAAACgVQiH2kHfvn01cuRI3Xbbberatas++OADLV26VGlpabr77rslSS+88ELYPVOmTNH8+fP1xhtvaPbs2RoxYkQ0SgcAAACQ4DZt2iTDMDRx4sRm1y61Lfa2bdtktVqVn58fGjNNU1OnTtXChQt14sQJ9e7dO2K1AwAAAGgZ4VCEbd68WU8//bS2b9+ujIwMSdKtt96qYDCoBQsWaMKECerWrVuL995zzz164403tHv3bsIhAAAAAO3O5/Np69atysnJUb9+/cKutWZb7NLSUg0aNEhOpzPs3uHDh4euEw4BAAAA7Y9wKMJWrVqlYcOGhYKhJuPGjdPatWtVWlqq7373uy3e29Qk1dbWRrxOAAAAALhQUVGRzpw502xLudZui33q1CmlpaU1W7dprKqqKrIPAAAAAKBFhEMRVl1drS5dujQb9/l8ks5vw3Axx48flyR17949MsUBAAAAwDfYtGmT7Ha7xo8fHzbe2m2xGxoalJSU1Gxd0zRD1wEAAAC0P0u0C4h311xzjY4dO6bPPvssbHzz5s2yWCzKzMyUy+WS1+sNux4MBrVkyRJJUm5ubrvVCwAAAACS5Ha79e677yo3N/eiW2H/rXvuuUeStHv37tBYcnJys15HkjweT+g6AAAAgPbHm0MRNmfOHO3cuVMzZszQjBkz1LVrV33wwQfauXOnpk2bpl69emnv3r167LHHNGHCBA0YMEAej0fvvPOOiouLlZ+frxtuuCHajwEAAAAgwezYsUP19fXNtpS7mJa2xU5LS9PJkyebzT116pQkKT09vQ0qBQAAAHC5CIci7MYbb9Tq1av10ksv6fXXX9eZM2fUt29fFRQU6L777pMk9enTR9/5znf0zjvvqLq6WhaLRddee62effZZ5efnR/kJAAAAACSijRs3KiUlRePGjWvV/Ja2xR46dKj27t0rl8slp9MZGj906JAkadiwYW1YMQAAAIDWIhxqB8OHD9crr7xy0ev9+/fX4sWL27EiAAAAALi406dPa8+ePZowYYI6deoUds3lcikpKSnsLKGLbYudl5enFStWaM2aNZozZ44kyev1au3atcrOzg69bQQAAACgfREOAQAAJBDDkEzTLrvdKpvNIqv1r0dQNjYG5PcH5PM1yuPxKRiMYqEAomrLli3y+/0tbil39OjRVm+LnZ2drby8PC1cuFA1NTUaOHCgCgsL9cUXX+j5559vz0cCAAAA8DeMYJC2H22vpsalQIAfLQAAYoVhGHI4kmSadlksxiXnBwJBeTw+ud1e8esiIs1iMdSjh/PSE9Fu8vPzdfz4ce3atUtWqzXs2vHjx7VgwQIdOXIkbFvs6dOnKz8/X4YR/v8xHo9HixYt0saNG1VbW6vMzEw98sgjuuWWW9rzkdoMvQ4AAABaK5Z7HcIhRAQNEwAAscM0bXI6TVks598SKvm0WrsOfalPjp9RReVZeXyNMu1WDcjorOv6d9Ut2X2UNbinJCkQCMjl8sjj8UfzERDnYrlhAi5ErwMAAIDWiuVeh3AIEUHDBABAbHA4TKWknD8X5GBZlZatL1HFyXOXvG9ARqrum5SlnMx0SVJdnVdutyeitSJxxXLDBFyIXgcAAACtFcu9DuEQIoKGCQCA6GsKhvz+gJYWHta2jz6/7DXybhqoB+4YLpvNQkCEiInlhgm4EL0OAAAAWiuWex1btAsAAABA2zNNWygY+uWKvSouq7qidbZ99LmqvqrX/HtHf71eI1vMAQAAAIg5hiGZpl12u1U2m0VWqyV0rbExIL8/IJ+vUR6PT7wuAUiWS08BAABAR2IYhpxOU5K0tPDwFQdDTYrLqrS08LAkyek0mx02DwAAAADR0tT/dO/uVGpqspKT7bLZrDIMI/SPzWZVcrJdqanJ6t7dSV8DiDeHAAAA4o7DkSSLxaLisqor2kquJds++lzfHd5HOZnpcjiS5HKxvRwAAACA6DJNm5xOUxbL+XcgSj6t1q5DX+qT42dUUXlWHl+jTLtVAzI667r+XXVLdh9lDe6pTp2SZJo2uVwedkZAwuLMIUQE+3ADABAdhiF17+6UxWLo4X9/TxUnz7XZ2gMyUvXyz8YpEAjq9GkXWzGgzcTyPtzAheh1AACIDU1nrErSwbIqLVtf0qr+Z0BGqu6blKWczHRJ4mxVRFQs9zpsKwcAABBHTNMui8VQyafVbRoMSVJF5TmVfFoti8WQadrbdG0AAAAAaK2mYMjvD+jlNz/W03/Y0+r+p6LynJ7+wx69/ObH8vsDSklJksNhRrhiIPYQDgEAAMQRu90qSdp16MuIrF/09bpNnwMAAAAA7ck0baFg6Jcr9l7xVtrbPvpcv1yxNxQQmSYnsCCxEA4BAADEEZvt/K93nxw/E5H1m9Zt+hwAAAAAaC+GYcjpPP+Wz9LCwyouq7qq9YrLqrS08LAkyek0ZRjGVdcIdBR09QAAAHHEaj3/611F5dmIrN+0VUPT5wAAAABAe3E4kmSxWFRcVnXFbwxdaNtHn+tgWZUsFoscjqQ2WRPoCOjqAQAA4pDH1xiZdb3+iKwLAAAAAN/EMBQ6+3T5+pI2XXvZhvPrmaZdvDyEREE4BAAAEIfMCJ0JZCaxDzcAAACA9meadlkshko+rQ7taNBWKirPqeTTalksRiiAAuId4RAAAEAcaWwMSJIGZHSOyPoDeqWGfQ4AAAAAtAf711+A23Xoy4isX/T1uvYIfdEOiDWEQwAAAHHE7z8f2lzXv2tE1m9at+lzAAAAAKA92Gzn/yr7k+NnIrJ+07pNnwPEO37SAQAA4ojv67OGbsnuE5H1c79e1xehM40AAAAAoCVW6/m/yq6oPBuR9Zu2qmv6HCDe8ZMOAAAQRzwenwKBoLIG9wxtAddWBmSkKmtwTwUCQXk8vjZdGwAAAABawxOhL6p5vP6IrAvEKsIhAACAOBIMKhTc3Dc5q03Xvm/S+fU8Hp+CwTZdGgAAAABaxYzQmUBmki0i6wKxinAIAAAgzrjdXgUCAeVkpivvpoFtsmbeTQOVk5muQCAgt9vbJmsCAAAAQGs1Np4/93RARueIrN+080LT5wDxjnAIAAAgzgSDQblcHknSA3cM17cz069qvW9npuuBO4ZLklwuj4K8NgQAAACgnfn950Ob6/p3jcj6Tes2fQ4Q7wiHAAAA4pDH41ddnVc2m0Xz7x19xW8Q5d00UPPvHS2bzaK6Oq88HvbhBgAAAND+fF+fNXRLdp+IrJ/79bq+CJ1pBMQaNlIEAACIU273+beHUlKS9PC0Efru8D5atqFEFZXnLnnvgIxU3TcpSzlfv3VUV+cNrQcAAAAA7c3j8cnhMJU1uKcG9EpVxclL9zWtNSAjVVmDeyoQCIbOcAXinRFkXxBEQE2NS4EAP1oAAMQC07TJ6TRlsZx/abzk02oVHfpSnxw/o4qT5+Tx+mUm2TSgV6qu699Vudl9lDW4pyQpEAjI5fLwxhAiymIx1KOHM9plAK1CrwMAQPQ4naY6dUrSwbIqPf2HPW227i/uH6OczHTV13tDW3QDbSGWex3eHAIAAIhzHo9fXm+jHI4kmaZdWYN7hsKfi2n6xpzb7eWMIQAAAAAxwe32yjRtyslMV95NA7Xto8+ves28mwYqJzNdgUBAbre3DaoEOgbCIQAAgAQQDAblcnnkdntkmnbZ7VbZbBZZrX89grKxMSC/PyCfr1Eej09kQgAAAABiSVNf07lzJz1wx3BVfVWv4rKqK17v25npeuCO4ZIkl8vDF+OQUNhWDhHBVgsAAABorVjeagG4EL0OAADR53CYSklJkt8f0NLCw1f0BlHeTQP1wB3DZbNZOGMVERPLvQ7hECKChgkAAACtFcsNE3Aheh0AAGJDU0AkSQfLqrRsQ4kqKs9d8r4BGam6b1KWcjLTJYlgCBEVy70O4RAigoYJAAAArRXLDRNwIXodAABih2na5HSasljOb5dd8mm1ig59qU+On1HFyXPyeP0yk2wa0CtV1/XvqtzsPqHzVwOBgFwujzwefzQfAXEulnsdzhwCAAAAAAAAAHQ4Ho9fXm+jHI4kmaZdWYN7hsKfiwkEgvJ4fHK7vZwxhIRGOAQAAAAAAAAA6JCCwaBcLo/cbo9M0y673SqbzSKr1RKa09gYkN8fkM/XKI/HJzIhgHAIAAAAAAAAANDBBYNSQ4NPDQ2+aJcCdAiWS0/B1SovL1dBQYHGjh2r7Oxs5eXl6Xe/+53q6+vD5hUXF+uuu+5Sdna2br75Zj333HNyu91RqhoAAAAAAAAAAMQj3hyKsBMnTmjatGlKTU3V3XffrS5duujjjz/WSy+9pKNHj2rJkiWSpNLSUs2ePVuDBw/WvHnzVFlZqRUrVqi8vFzLli2L8lMAAAAAAAAAAIB4QTgUYevXr9fZs2e1atUqfetb35Ik5efnKxAIaN26daqtrVWXLl20cOFCde7cWa+99pqcTqckqV+/fnrqqadUVFSk3NzcaD4GAAAAAAAAAACIE2wrF2Eul0uS1KNHj7DxtLQ0WSwW2e12uVwu7d69W5MmTQoFQ5I0efJkpaSkaOvWre1aMwAAAAAAAAAAiF+EQxE2atQoSdKTTz6p0tJSnThxQlu2bNHrr7+umTNnKiUlRWVlZfL7/crKygq7NykpScOGDVNpaWk0SgcAAAAAAAAAAHGIbeUibOzYsXrkkUe0dOlSvffee6HxBx98UAUFBZKkU6dOSZLS09Ob3Z+WlqYDBw60T7EAAAAAAAAAACDuxW04FAwG9dVXX6mhoUF9+vSJai19+/bVyJEjddttt6lr16764IMPtHTpUqWlpenuu+9WQ0ODpPNvCl3INM3QdQAAAAAAAAAAgKsVd+HQ0aNHtWTJEu3evVv19fUyDEPHjh0LXa+trdWvf/1rSdLPf/5zJScnR7SezZs36+mnn9b27duVkZEhSbr11lsVDAa1YMECTZgwIVSD1+ttdr/H44l4jQAAAAAAAAAAIHHE1ZlD69atU35+vnbs2KG6ujoFg0EFg8GwOV26dFFFRYXefPNNvfvuuxGvadWqVRo2bFgoGGoybtw41dfXq7S0VGlpaZKkqqqqZvefOnWqxe3mAAAAAAAAAAAArkTchEOffPKJ5s+fL7/fr5kzZ+rtt99Wt27dWpw7ZcoUBYNB7dy5M+J1VVdXKxAINBv3+XySJL/fryFDhshms6mkpCRsjtfrVWlpqYYOHRrxOgEAAAAAAAAAQGKIm3Bo5cqV8vl8mjFjhp588kndcMMNslqtLc4dM2aMpPNb0EXaNddco2PHjumzzz4LG9+8ebMsFosyMzOVmpqqMWPGaMOGDXK5XKE569evV11dnfLy8iJeJwAAAAAAAAAASAxxc+bQ3r17ZRiGfvzjH19ybq9evZScnKwTJ05EvK45c+Zo586dmjFjhmbMmKGuXbvqgw8+0M6dOzVt2jT16tVLklRQUKA777xTM2fO1PTp01VZWamVK1cqNzdXY8eOjXidAAAAAAAAAAAgMRjBCw/l6aCGDx8um82m4uLi0Fhubq5qampUWlrabP5NN90kl8vVbCu3SDh8+LBeeukllZaW6syZM+rbt6/uuOMO3XfffbLZ/prP7d+/XwsWLNCxY8fkcDg0fvx4zZ07V06nM+I1trWaGpcCgbj40QIAAECEWSyGevToeL/zxqN58+apsLDwotd37twZ+oJbcXGxfvWrX+nYsWNyOp0aP368CgoK5HA4wu7xer1avHix1q9fr7NnzyozM1OPPvqobr755og+S6TQ6wAAAKC1YrnXiZtwaOTIkfJ4PDp8+LAMw5B08XDI6/UqJydHnTt31p49e6JRbtyjYQIAAEBrxXLDlGgOHjyoioqKsLFgMKhnnnlGffv21ebNmyVJpaWlys/P1+DBg0M7H6xYsUKjR4/WsmXLwu6fO3eutm/frlmzZmnQoEEqLCzUkSNH9Oqrr2rkyJHt9mxthV4HAAAArRXLvU7cbCvXv39//elPf9Jnn32ma6+99hvn7tq1S42NjbruuuvaqToAAAAAiH05OTnKyckJG9u/f7/q6+t1++23h8YWLlyozp0767XXXgvtdNCvXz899dRTKioqUm5urqTzuyhs3rxZjz/+uObMmSNJmjJliiZOnKgFCxZo9erV7fRkAAAAAP6WJdoFtJWxY8cqGAzq1Vdf/cZ5LpdLv/71r2UYhr73ve+1U3UAAAAA0DFt2rRJhmFo4sSJks73VLt379akSZPCtsCePHmyUlJStHXr1tDYtm3bZLValZ+fHxozTVNTp07VwYMH2+UcWAAAAADNxU049KMf/Uipqal64403tGjRIp09ezbsekNDg/77v/9b06ZN01/+8hf17NlT06dPj1K1AAAAABD7fD6ftm7dqpycHPXr10+SVFZWJr/fr6ysrLC5SUlJGjZsWNi23qWlpRo0aFCzc1SHDx8eug4AAACg/cXNtnLdu3fX4sWL9dBDD2np0qVatmyZmo5Tys3N1ZkzZ9TY2KhgMKiUlBT99re/VUpKSpSrBgAAAIDYVVRUpDNnzoRtKXfq1ClJUnp6erP5aWlpOnDgQNjctLS0FudJUlVVVVuXDAAAAKAV4ubNIUn67ne/qzVr1mjUqFHy+/2hMKi6ulp+v1/BYFCjRo3SmjVrmu2jDQAAAAAIt2nTJtntdo0fPz401tDQIOn8m0IXMk0zdL1p7sXm/e1aAAAAANpX3Lw51CQzM1OvvvqqvvjiCxUXF6uqqkqNjY1KS0vTt7/9bQ0cODDaJQIAAABAzHO73Xr33XeVm5urbt26hcaTk5MlSV6vt9k9Ho8ndL1p7sXm/e1aAAAAANpX3IVDTfr27au+fftGuwwAAAAA6JB27Nih+vr6sC3lpG/eEu7UqVNh282lpaXp5MmTLc6TWt6aDgAAAEDkxdW2cgAAAACAtrFx40alpKRo3LhxYeNDhgyRzWZTSUlJ2LjX61VpaamGDh0aGhs6dKjKy8vlcrnC5h46dEiSNGzYsAhVDwAAAOCbEA4BAAAAAMKcPn1ae/bs0fe//3116tQp7FpqaqrGjBmjDRs2hIU+69evV11dnfLy8kJjeXl5amxs1Jo1a0JjXq9Xa9euVXZ2tnr37h35hwEAAADQTFxtKxcMBvX2229ry5YtKisrU21trRobGy863zAMHTt2rB0rBAAAAIDYt2XLFvn9/mZbyjUpKCjQnXfeqZkzZ2r69OmqrKzUypUrlZubq7Fjx4bmZWdnKy8vTwsXLlRNTY0GDhyowsJCffHFF3r++efb63EAAAAAXMAIBoPBaBfRFtxut+6//34VFxertY9kGIZKS0sjXFliqqlxKRCIix8tAAAARJjFYqhHD2e0y8DfyM/P1/Hjx7Vr1y5ZrdYW5+zfv18LFizQsWPH5HA4NH78eM2dO1dOZ/i/S4/Ho0WLFmnjxo2qra1VZmamHnnkEd1yyy3t8Shtjl4HAAAArRXLvU7chEP/9m//ppUrV8pqtWrixInKzc1Vz549L9rINBk1alQ7VZhYaJgAAADQWrHcMAEXotcBAABAa8VyrxM328pt27ZNhmHo5z//uWbMmBHtcgAAAAAAAAAAAGKSJdoFtJWamhpZrVZNmzYt2qUAAAAAAAAAAADErLgJh9LS0tSpUyclJSVFuxQAAAAAAAAAAICYFTfhUG5urlwulz799NNolwIAAAAAAAAAABCz4iYceuCBB9SlSxc9//zz8vl80S4HAAAAAAAAAAAgJhnBYDAY7SLayuHDh/Xoo4/K4XDo3nvvVVZWlhwOxzfe06dPn3aqLrHU1LgUCMTNjxYAAAAiyGIx1KOHM9plAK1CrwMAAIDWiuVeJ67CIZfLpUWLFuk///M/ZRjGJecbhqFjx461Q2WJh4YJAAAArRXLDRNwIXodAAAAtFYs9zq2aBfQVk6fPq1Zs2aFzhxqTeYVR7kYAAAAAAAAAABAq8RNOPTyyy/rk08+UadOnXTPPfcoNzdXPXv2lNVqjXZpAAAAAAAAAAAAMSNuwqH3339fhmHo+eef1w9+8INolwMAAAAAAAAAABCTLNEuoK3U1NTIbrfrtttui3YpAAAAAAAAAAAAMStuwqH09HTZbDa2kQMAAAAAAAAAAPgGcRMOjRs3TvX19Tpy5Ei0SwEAAAAAAAAAAIhZcRMO/dM//ZPS09P1L//yLzp79my0ywEAAAAAAAAAAIhJRjAYDEa7iLbwxz/+UV988YVeeOEFJSUlafr06Ro+fLgcDsc33nfjjTe2U4WJpabGpUAgLn60AAAAEGEWi6EePZzRLgNoFXodAAAAtFYs9zpxEw4NHTpUhmFc1j2GYejYsWMRqiix0TABAACgtWK5YQIuRK8DAACA1orlXscW7QLa0uXmXHGSiwEAAAAAAAAAALRa3IRDf/rTn6JdAgAAAAAAAAAAQMyzRLsAAAAAAAAAAAAAtB/CIQAAAAAAAAAAgARCOAQAAAAAAAAAAJBAOuSZQ+vWrZMkOZ1O/cM//EPY2OWaMmVKm9XVknnz5qmwsPCi13fu3KlevXpp5syZ2rdvX7Prubm5Wr58eSRLBAAAAAAAAAAACaRDhkPz5s2TYRi65pprQuFQ09jlMAwj4uFQfn6+xowZEzYWDAb1zDPPqG/fvurVq1doPCMjQ3Pnzg2bm56eHtH6AAAAAAAAAABAYumQ4VCfPn0khQcnTWOxJicnRzk5OWFj+/fvV319vW6//faw8dTUVE2ePLk9ywMAAAAAAAAAAAmmQ4ZD7733XqvGYtWmTZtkGIYmTpzY7Jrf75fH45HD4YhCZQAAAAAAAAAAIN51yHCoI/P5fNq6datycnLUr1+/sGvl5eUaMWKEfD6fevbsqWnTpunhhx+W3W6PUrUAAAAAAAAAACDedNhwaNasWeratat++9vfRruUy1JUVKQzZ84021Kuf//+Gj16tIYMGaK6ujpt375dS5YsUXl5uRYtWhSlagEAAAAAAAAAQLzpsOHQvn371LNnz2iXcdk2bdoku92u8ePHh42/8MILYX+eMmWK5s+frzfeeEOzZ8/WiBEj2rNMAAAAAAAAAAAQpyzRLiCRuN1uvfvuu8rNzVW3bt0uOf+ee+6RJO3evTvSpQEAAAAAAAAAgARBONSOduzYofr6+mZbyl1M7969JUm1tbWRLBvhAQUAACAASURBVAsAAAAAAAAAACQQwqF2tHHjRqWkpGjcuHGtmn/8+HFJUvfu3SNZFgAAAAAAAAAASCCEQ+3k9OnT2rNnj77//e+rU6dOYddcLpe8Xm/YWDAY1JIlSyRJubm57VYnAAAAAAAAAACIb7ZoF3A1PB6P1q1bd1VrTJkypY2q+WZbtmyR3+9vcUu5o0eP6rHHHtOECRM0YMAAeTwevfPOOyouLlZ+fr5uuOGGdqkRAAAAAAAAAADEPyMYDAajXcSVGDp0qAzDuKo1DMPQsWPH2qiib5afn6/jx49r165dslqtYdeOHz+uBQsW6MiRI6qurpbFYtG1116r6dOnKz8//6qfMxpqalwKBDrkjxYAAADamcViqEcPZ7TLAFqFXgcAAACtFcu9TocPh66mfMMwVFpa2oZVoQkNEwAAAForlhsm4EL0OgAAAGitWO51OvS2ct26ddObb74Z7TIAAAAAAAAAAAA6jA4dDlksFvXt2zfaZQAAAAAAAAAAAHQYlmgXAAAAAAAAAAAAgPZDOAQAAAAAAAAAAJBACIcAAAAAAAAAAAASCOEQAAAAAAAAAABAArFFu4ArNWXKFKWmpka7DAAAAACIO0ePHtVLL72k4uJieTwe9e/fX9OnT9esWbMkSTNnztS+ffua3Zebm6vly5eHjXm9Xi1evFjr16/X2bNnlZmZqUcffVQ333xzuzwLAAAAgOY6bDj0r//6r9EuAQAAAADiTlFRkR588EFdf/31euihh5SSkqKKigpVVlaGzcvIyNDcuXPDxtLT05utN2/ePG3fvl2zZs3SoEGDVFhYqPvvv1+vvvqqRo4cGdFnAQAAANAyIxgMBqNdBOJPTY1LgQA/WgAAALg0i8VQjx7OaJcBSS6XS7fddptycnL029/+VhZLyzuRz5w5U1999ZU2bdr0jesdPnxY06ZN0+OPP645c+ZIkjwejyZOnKgePXpo9erVbf4MkUavAwAAgNaK5V6HM4cAAAAAAJKkjRs3qrq6WgUFBbJYLKqrq1MgELjofL/fL7fbfdHr27Ztk9VqVX5+fmjMNE1NnTpVBw8e1IkTJ9q0fgAAAACt02G3lQMA4FIMQzJNu+x2q2w2i6zWv34norExIL8/IJ+vUR6PT7xHCwBob16vV9XV1bLb7UpLSwu75na79bvf/U4ffvihLBaL/u7v/k4PPvigkpOTI1rTnj175HQ6dfLkST300EMqLy9XSkqKJk2apJ///OcyTTM0t7y8XCNGjJDP51PPnj01bdo0Pfzww7Lb7aE5paWlGjRokJzO8G9LDh8+PHS9d+/eEX0mAAAAAM0RDgEA4o5hGHI4kmSadlksRotzbDarbDarkpPtcjhMeTw+ud1esdsqAKC9vPnmm3ruuec0ZcoUvfjii2HXHnjgAR04cCD036WysjIdOHBA//Ef/yHDaPm/bW2hvLxcjY2NeuihhzR16lQ99thj2rdvn1577TWdO3dOCxculCT1799fo0eP1pAhQ1RXV6ft27dryZIlKi8v16JFi0LrnTp1qlnwJSk0VlVVFbFnAQAAAHBxhEMAgLhimjY5nWbojISST6u169CX+uT4GVVUnpXH1yjTbtWAjM66rn9X3ZLdR1mDe6pTpySZpk0ul0cejz/KTwEASARFRUWSpNtvvz1s/N1339X+/ftlsVg0ceJEJScna926ddq/f7/Wr1+vKVOmRKymuro61dfX684779RTTz0lSbr11lvl9Xq1Zs0a/fSnP9WgQYP0wgsvhN03ZcoUzZ8/X2+88YZmz56tESNGSJIaGhqUlJTU7HOa3kBqaGiI2LMAAAAAuDjCIQBA3HA4TKWknP8LqINlVVq2vkQVJ881m9fgbdT/Vnyl/634Sls+/EwDMlJ136Qs5WSmq3PnTqqr88rt9rR3+QCABPOXv/xFknTDDTeEjW/atEmGYejHP/6xCgoKJEnXX3+9nn32WW3cuDGi4VDTtnUTJ04MG7/99tu1Zs0affzxxxo0aFCL995zzz164403tHv37lA4lJycLK/X22yux+MJ+zwAAAAA7cty6SkAAMS+pmDI7w/o5Tc/1tN/2NNiMNSSispzevoPe/Tymx/L7w8oJSVJDod56RsBALgKp0+fVnJysrp06RI2vnfvXknS1KlTQ2OTJ0+WdH57uUhKT0+XJPXo0SNsvHv37pKk2trai97bdHbQ385JS0vTqVOnms1tGmv6PAAAAADti3AIANDhmaYtFAz9csVebfvo8ytaZ9tHn+uXK/aGAiLT5AVbAEDk1NfXh7ZBbfJ///d/On36tHr37q3+/fuHxlNSUtS5c2edOXMmojU1vcV08uTJsPGms4GaQqKWHD9+vNmcoUOHqry8XC6XK2zuoUOHJEnDhg27+qIBAAAAXLa4C4cqKyv14osvasKECcrJydH1118fdr22tla///3vtXTpUvn9nCkBAB2dYRhyOs+/5bO08LCKy67uYOvisiotLTwsSXI6zYge+g0ASGxdunRRXV2dzp49Gxr76KOPJEk5OTnN5vv9fjkcjojWNH78eEnSW2+9FTb+1ltvyWazadSoUXK5XM22igsGg1qyZIkkKTc3NzSel5enxsZGrVmzJjTm9Xq1du1aZWdnh942AgAAANC+4uor0R9++KEeffRRuVwuBYNBSWr2l3pdunTRjh07dPToUV133XX63ve+F41SAQBtxOFIksViUXFZ1RW/MXShbR99ru8O76OczHQ5HElyuTh/CADQ9q6//noVFRXprbfe0r333qtAIKC33npLhmFo9OjRYXNPnz6turo6DR48OOI1/fCHP9Tbb7+txsZG3Xjjjdq3b5+2bdumBx54QL169dLevXv12GOPacKECRowYIA8Ho/eeecdFRcXKz8/P+wMpezsbOXl5WnhwoWqqanRwIEDVVhYqC+++ELPP/98RJ8FAAAAwMXFTTh04sQJ/fSnP5Xb7da4ceM0ZcoUzZ8/P+xbeE1++MMfqqSkRP/zP/9DOAQAHZhhSKZplyQtX1/Spmsv21Cil382TqZpl9vt0dffOQAAoM3ccccd2rVrl379619r9+7dOn36tI4dOyaHw6G8vLywufv375ekiIdDkvTss8+qT58+Wrt2rXbs2KE+ffron//5nzV79mxJUp8+ffSd73xH77zzjqqrq2WxWHTttdfq2WefVX5+frP1/v3f/12LFi3Shg0bVFtbq8zMTP3+97/XjTfeGPFnAQAAANCyuAmHVqxYIbfbrfHjx+s3v/mNJOkXv/hFi3Obtjk4cuRIu9UHAGh7pmmXxWKo5NNqVZw816ZrV1SeU8mn1coa3FOmaVdDg69N1wcA4Ac/+IF27dqlwsJCFRUVSZJM09Szzz6rzp07h83dsmVLi28URYLdbtdPfvIT/eQnP2nxev/+/bV48eJWr2eapp544gk98cQTbVUiAAAAgKsUN+FQUVGRDMPQI488csm5/fv3V1JS0v+zd+/RUdXn/sc/ezKTCZkEhJCAURIUDzdDwh2RQLug1aAYsCKgNgjFCgddKtha9KeuahUrUkSrUpSLYtsj3pCLFSq1p0dEQYygkYiVc2JQzIUggZkkc//9kWbqQCABMpfMvF9rZS3z3c9897N1XJlnnu/+bn399ddhyAwAECoWS4Ik6d09B0My/7Y9B5XTq6sslgSaQwCAkHjkkUc0efJkffzxx+rYsaNGjhypHj16BMW4XC6lpqZq0qRJGjNmTIQyBQAAABBLYqY59O233yopKUk9e/ZsVXxycrLsdntokwIAhJTZbJIkfXngSEjmb5q36TwAAITCkCFDNGTIkJMeT0xM1G9+85swZgQAAAAg1sXMt12GYcjn87Uq1uPxyG63y2azhTgrAEAoJSQ0/hkrrzjx+XJtoWmruqbzAAAAAAAAALEgZu4cOu+887R//34dPHhQmZmZp4z98MMP5fF4Wn2XEQAgujnd3tDM6/KEZF4AAI7ncrm0fft2lZSUqKamRpKUlpamnJwcXXrppUpMTIxwhgAAAABiScw0h0aOHKn9+/frpZde0vz5808a53a7tXTpUhmGodGjR4cxQwBAqFgtCWpwtX2DyJoYM38mAQBR7I9//KOeeuop1dbWNnu8U6dOuuWWW1RUVBTmzAAAAADEqpjZJ2fGjBmyWCxatWqVXnnllWZjPvvsM82cOVN79uyRzWbT9ddfH+YsAQBtyett3E40q3vHkMyf1S016DwAALS1//f//p8efvhhHTlyRH6/X926dVNubq5yc3PVrVs3+f1+HTlyRAsXLtTdd98d6XQBAAAAxIiYWRJ93nnn6aGHHtKCBQt0//336/HHH9exY43Pipg2bZq++eYbHTp0SH6/X2azWY8++qi6dOkS4awBAGfD4/HJbE7QRT3O0Rfl37X5/Bf1OCdwHgAA2tqmTZv02muvSZIKCwt1yy23KDs7OyimvLxczzzzjN544w298cYbuvTSS3XVVVdFIl0AAAAAMSRm7hySGguq5557TllZWTp8+LDcbrf8fr92796t6upq+f1+ZWdn67nnntO4ceMinS4A4Cy5//WsodF5p37W3JnK/9e87hA90wgAEN/+/Oc/yzAM/fSnP9WiRYtOaAxJUlZWln7729/qpz/9qfx+v/785z9HIFMAAAAAscbw+/3+SCfR1vx+vz788EMVFxerqqpKXq9X6enpGjx4sEaMGKGEhIRIpxjzamrs8vli7q0FIMoYhtSlS4pMJkO3LHpH5ZXH2mzurO6pevqXY+Xz+XX4sF2x99cSAKKHyWQoLS0l0mmE3ZAhQ1RfX6/33ntPnTt3PmXsd999p0svvVTJycn66KOPwpQhmkOtAwAAgNaK5lonZraV+z7DMDR8+HANHz480qkAAELI75ecTrc6dEjUTRNzdP+z77fZ3DcV5khqnJ/GEAAgVFJTU1tsDElS586d1bFjR3m93M0KAAAA4OzF1LZyAID443C45PP5NKhPhgouOXE7njNRcEm2BvXJkM/nk8PhapM5AQA43gUXXCC73S6Hw9FirMPhkN1u1wUXXBCGzAAAAADEuphpDtXU1GjNmjXatGlTi7EbNmzQmjVrdPjw4TBkBgAIJb/fL7vdKUmafXWuBvfJOKv5BvfJ0OyrcyVJdrtTMbj7KgAgSlxzzTXyer364x//2GLsn/70J3m9Xl1zzTVhyAwAAABArIuZbeU2bNigRYsW6dZbb20x9vPPP9fq1aslSdOnTw9pXgsWLNC6detOevx//ud/1K1bN0lScXGxHnvsMe3du1cpKSkaP3685s2bJ5vNFtIcAaC9czo9qqtzKTk5Uff9bISWr/tEmz/46rTnKbgkW7OvzpXZbFJdnUtOpycE2QIA0Oi6667Thx9+qCeeeEJut1szZ8484bN/fX29Vq5cqWXLlunKK6/UtGnTIpQtACCaGIZktVpksSTIbDYpIeHf67+9Xp88Hp/cbi/bZAMATsrwx8iS6KKiIu3atUubNm1Sr169Thn7xRdfqLCwUCNGjNALL7wQ0rw+/vhjlZeXB435/X79+te/1nnnnac333xTklRaWqqpU6eqV69emjJliioqKrRq1SqNGDFCK1asCGmOocBDWgFEgs1mVXJyoiTp431VWrGhROUVx1p8XVb3VN1UmKNB/7rrqK7OJYfDGdJcAQD/Fs0PaQ2lu+++W5K0detW2e12JSUlKScnRxkZjX+PqqqqVFJSooaGBqWmpmrcuHHNzmMYhhYuXBi2vOMdtQ6ASDIMQzZboqxWi0wmo8V4n88vp9Mth8PFrggAEAHRXOvETHPoBz/4gY4cOaI9e/a0Kj4vL09paWl65513QpzZiXbt2qUbbrhB8+bN05w5cyRJP//5z1VaWqrNmzcrJaXxzfLKK6/o3nvv1cqVK5Wfnx/2PM8GBRPOFqugcKasVrNSUqwymRrfMyX7D2nbnoP68sARlVcek9PlkTXRrKxuqbqoxznKz8tUTq+ukiSfzye73ckdQwAQZtFcMIVS3759ZRhGq7+sOz626XfDMFRaWhqqNHEcah0AkdJcrfNuU61TcVROt1dWS4KyunfURT3O0WhqHQCIuGiudWJmW7mamppAU6U1OnTooEOHDoUwo5PbtGmTDMPQhAkTJEl2u13bt2/XjTfeGHQNEydO1MKFC/XWW2+1u+YQcKZaswrKbE6Q2ZygpCSLbDYrq6AQxOn0yOXyBt5HOb26Bgqik2E1HQAgEiZNmiTDaHnVNwAAJ+ySsL5E5ZUn7pLQ4PLqi/Lv9EX5d/rLe/8XtEtCx44d2CUBABAQM82hlJQUHTt2TE6nU1ar9ZSxTqdTx44dO61mUltxu9166623NGjQIJ1//vmSpH379snj8SgnJycoNjExUf369WMVIOLGma6C6tAhUVarmVVQCPD7/bLbnXI4nNyBBgCIWr/97W8jnQIAoB1oagx5PL7Tfr5qecUx3f/s+4HnqzY1mGgQAQBipjn0H//xH9q1a5f+/ve/q6Cg4JSx77zzjrxery644IIwZfdv27Zt05EjR3TVVVcFxqqrqyUpsLf496Wnp+ujjz4KW35ApLAKCqHg90sNDW41NLgjnQoAAAAAnDar1RxoDP1m1Q4V76s6o3k2f/CVqr6r130/G/Gv+bwsrgSAOGdqOaR9GDt2rPx+vxYtWqTKysqTxlVWVmrRokUyDEM/+tGPwphho02bNslisWj8+PGBsYaGBkmNdwodz2q1Bo4Dser7q6CefmW37n/2/WYbQ81pWgX19Cu75fH4lJycKJvt1HcPAgAAAAAQ7QzDUEpKY327fN0nZ9wYalK8r0rL130iSUpJsbK1KQDEuZi5c2jatGl64YUX9O2332rSpEmaPXu2fvjDHyozM1OSdPDgQf3973/Xs88+q++++07du3fX9ddfH9YcHQ6H/va3vyk/P1+dO3cOjCclJUmSXC7XCa9xOp2B40AsYhUUAACAdOjQIW3ZskUlJSWqqamRJKWlpSknJ0eXX365unY99fPzAACxx2ZLlMlkUvG+qtPaSu5UNn/wlS7NzdSgPhmy2RJlt7PzBgDEq5hpDnXo0EFPP/20brrpJn333Xd69NFH9eijj54Q5/f71blzZy1btkzJyclhzXHr1q2qr68P2lJOatw6TpKqqk78Ury6urrZ7eaAWBCqVVC3XDtQKSlWuVxe+XmQDAAAiGJer1dPPPGEVq9eLY+ncWFL0+cXwzD0xhtv6Le//a1+9rOf6bbbblNCQkIk0wUAhIlhSFarRZK0cn1Jm869YkOJnv7lWFmtFjkcTp6/CgBxKma2lZOkiy++WOvWrdNVV12lhIQE+f3+oB+z2axJkybpjTfeUL9+/cKe38aNG5WcnKyxY8cGjffu3Vtms1klJcF/7F0ul0pLS9W3b99wpgmETahWQX28r0omk0k224lbNQIAAESTu+66S88995zcbrcsFosGDRqkK664QldccYUGDRoki8Uit9utZ599VgsWLIh0ugCAMLFaLTKZDJXsP9Tqbddbq7zimEr2H5LJZAQaUACA+BMzdw416d69ux577DE9+OCDKikpUXV1tQzDUHp6unJyciK2Rdvhw4f1/vvv68orr1SHDh2CjqWmpmrkyJHasGGD5s6dq5SUFEnS+vXrVVdXp4KCgkikDIQUq6AAAEC827p1q958801J0syZM/Wf//mf6tixY1DMsWPHtGzZMq1atUqbNm1SQUGBxo0bF4l0AQBhZLE03in67p6DIZl/256DyunVVRZLghoa3CE5BwAgusVcc6hJhw4dNGzYsEinEfCXv/xFHo/nhC3lmsybN0/Tpk1TUVGRpkyZooqKCq1evVr5+fkaM2ZMmLMFQi8cq6ByenWV1Wrhgy4AAIhKr776qgzD0OzZs3XHHXc0G5Oamqq77rpLiYmJ+sMf/qBXXnmF5hAAxAGzuXGzny8PHAnJ/E3zNp0HABB/+AsQJhs3blRaWpouvfTSZo9ffPHFWr16taxWqx555BG9/PLLmjx5sp544okwZwqERzhWQX3/PAAAANHm008/lclk0qxZs1qMnTVrlkwmkz799NMwZAYAiLSEhMav7MorjoZk/qZFmk3nAQDEn5i9c8jpdKq2tjbwUNeTyczMDEs+a9eubTFm6NCheumll8KQDRB5rIICAADxrra2VikpKUpNTW0xNjU1VampqaqtrQ1DZgCAaOF0e0Mzr+vU35cBAGJfTDWH6uvrtWLFCm3atEnl5eUtxhuGob1794YhMwDHYxUUAACId506ddKRI0dkt9sDzx09mWPHjunYsWPq3LlzmLIDAEQDqyVBDa62bxBZE2PqK0EAwBmImW9Njx49qilTpuiZZ57RV199Jb/f3+KPz+eLdNpA3GMVFAAAiFcDBgyQz+fT888/32Ls888/L5/Pp5ycnNAnBgCIOK+38TurrO4dQzJ/VrfUoPMAAOJPzCwTeOaZZ/TPf/5TZrNZRUVFGjdunDIyMpSQwPNGgGjGKigAABCvfvKTn+i///u/9cwzz8jj8ejnP/+5bDZbUIzdbtdzzz2nZ599VoZhaPLkyRHKFgAQTh6PT2Zzgi7qcY6+KP+uzee/qMc5gfMAAOJTzHx7unXrVhmGoXvuuUfXX399pNMB0AKvt/GDblb3jiH5oMsqKAAAEO0uu+wyjR8/Xm+99ZaWL1+u559/XgMGDFBGRoYkqbKyUiUlJXI6nfL7/briiiv04x//OMJZAwDCwe32KinJotF5mfrLe//X5vPn52UGzgMAiE8x0xyqrKyUyWTSNddcE+lUALQCq6AAAACkRYsWqXv37nrxxRfV0NCgDz/8UIZhSJL8fr8kBXZHmD9/fiRTBQCEkdPpls1mVU6vrsrqlhp4rm5byOqeqpxeXeXz+eV0uttsXgBA+xIzzaFOnTrJ5XLJarVGOhUArcAqKAAAAMlisehXv/qVZsyYob/+9a8qKSlRTU2NJCktLU05OTm67LLL1K1btwhnCgAIJ7+/sUHUoUOibpqYo/uffb/N5r6psPH5dU6nW/9ahwAAiEMx0xwaMmSI/vrXv6qyspLCCWgHWAUFAADwb926dVNRUVGk0wAARBGHwyWr1axBfTJUcEm2Nn/w1VnPWXBJtgb1yZDP55PD4WqDLAEA7ZUp0gm0lZ///OdKSEjQ008/HelUALRC0yooSbppYk6bzs0qKAAAAABAe+f3+2W3OyVJs6/O1eA+GWc13+A+GZp9da4kyW53BrYvBQDEp4Rf//rXv450Em0hIyNDWVlZevrpp3XgwAH17t1bnTp1inRacau+3sWX8miR2+1TUpJZmekp+u5og778uvas5yy4JFuFY3rJ5/Pp6NGGNsgSAACEmmEYSk5OjHQaQKtQ6wAIJ6/XJ8MwZLWaNTrvPNXanWdUOxdckq351w+R2WxSXZ1L9fXssgEA4RDNtY7hj5FlAuPGjZMkHT58WA0NjV8Id+rUSTab7aSvMQxDW7duDUt+8aamxi6fLybeWggxq9Wsjh07yOPx6Terdqh4X9UZzzW4T4bu+9kImc0mHT1aL6fT04aZAgCAUDGZDKWlpUQ6jZB66qmn2myuW2+9tc3mwumj1gEQCTabNfDl4sf7qrRiQ4nKK1renj2re6puKszRoH/ddVRX55LD4QxprgCAf4vmWidmmkN9+/Y97dcYhqHS0tIQZAMKJpyOpg+5Ho9Py9d9ckb7KBdckq3ZV+cGVkHxYRcAgPYjmgumttK3b18ZhtEmc1HDRBa1DoBIsVrNSkmxymRqfEpEyf5D2rbnoL48cETllcfkdHlkTTQrq1uqLupxjvLzMpXTq6skyefzyW53sogSAMIsmmsdc6QTaCuPPPJIpFMAcIaaGjnJyYm65dqBujQ3k1VQAAAgpgwbNizSKQAA2jmn0yOXyyubLVFWq0U5vboGmj8n4/P55XS65XC4eMYQACBIzNw5hOjCajqcCVZBAQAQn6J5NV1b2bp1q5KSkpSfnx/pVHCWqHUARAPDkKxWiyyWBJnNJiUkmALHvF6fPB6f3G6vnE43z0kDgAiK5lqH5hBCgoIJZ8owjMAqKJOp5a1XWAUFAED7F80FU1vp27ev0tPT9e677wbGpk+frnPOOUdPPvlkBDPD6aLWAQAAQGtFc60TM9vKAYgNfr9fdrtTDoeTVVAAACCmHL+QZefOnera9dTbAQEAAABAKMRcc6iiokKrV6/Wtm3bdPDgQTmdTu3duzdwvLa2Vv/1X/8lwzA0a9Ysmc0x968AiAl+v9TQ4FZDgzvSqQAAAJy1Dh066OjRo5FOAwAAAAAkSaaWQ9qP9957T1dddZXWrFmj/fv3q76+/oTVeZ06ddLWrVu1dOlS/eMf/4hQpgAAAADiSc+ePeV2u7V69WrV19dHOp0WffbZZ5ozZ46GDx+uvLw8TZgwQWvWrAmKKS4u1nXXXae8vDyNGjVKDz30kBwOxwlzuVwuPfbYY8rPz1dubq6uvfZavffee+G6FAAAAADNiJnbZr799lvddtttcjgcGjt2rCZNmqT77ruv2dV511xzjUpKSvSPf/xD48aNi0C2AAAAAOJJYWGhSktLtWjRIi1atCgwXlNTo379+rV6HsMwgnZGCIVt27Zpzpw56t+/v+bOnavk5GSVl5eroqIiEFNaWqoZM2aoV69eWrBggSoqKrRq1SqVlZVpxYoVQfMtWLBAW7Zs0fTp09WzZ0+tW7dON998s1544QUNHTo0pNcCAAAAoHkx0xxatWqVHA6Hxo8fr8cff1yS9OCDDzYbm5+fL0n69NNPw5YfAAAAgPh144036uuvv9batWvl8XgC48fvdBBpdrtdv/rVr/TDH/5QTz75pEym5jebWLJkiTp27KgXX3xRKSmND9g9//zzde+992rbtm2BmuuTTz7Rm2++qbvuukuzZs2SJE2aNEkTJkzQ4sWL9dJLL4XnqPu8JgAAIABJREFUwgAAAAAEiZnm0LZt22QYhm6//fYWY3v06KHExER9/fXXYcgMAAAAQLwzmUy67777NH/+fP3v//6v6uvrNX36dHXq1Em///3vI51ewMaNG3Xo0CHNmzdPJpNJdXV1SkpKCmoS2e12bd++XTfeeGOgMSRJEydO1MKFC/XWW28FmkObN29WQkKCpk6dGoizWq2aPHmylixZom+//Vbnnntu+C4QAAAAgKQYag59++23SkpKUs+ePVsVn5ycLLvdHtqkAAAAAOB7bDabBgwYEPjdYrFo+PDhEcwo2Pvvv6+UlBRVVlZq7ty5KisrU3JysgoLC3XPPffIarVq37598ng8ysnJCXptYmKi+vXrp9LS0sBYaWmpevbsGdREkqTc3NzAcZpDAAAAQPjFTHPIMAz5fL5WxXo8HtntdtlsthBnBQAAAADNe+SRR2S1WiOdRpCysjJ5vV7NnTtXkydP1p133qmdO3fqxRdf1LFjx7RkyRJVV1dLkjIyMk54fXp6uj766KPA79XV1UpPT282TpKqqqpCdCUAAAAATiVmmkPnnXee9u/fr4MHDyozM/OUsR9++KE8Hk+r7zICAAAAgLZ29dVXRzqFE9TV1am+vl7Tpk3TvffeK0m67LLL5HK5tHbtWt12221qaGiQ1Hin0PGsVmvguCQ1NDScNK7pOAAAAIDwa/7pou3QyJEjJanFB5q63W4tXbpUhmFo9OjR4UgNAAAAANqFpKQkSdKECROCxq+66ipJ0u7duwMxLpfrhNc7nc7A8ab5Thb3/fMBAAAACK+YaQ7NmDFDFotFq1at0iuvvNJszGeffaaZM2dqz549stlsuv7668OcJQAAAABEr6at4tLS0oLGu3TpIkmqra095ZZw1dXVQdvNpaenB7ahOz7u++cDAAAAEF4x0xw677zz9NBDD8nn8+n+++/XpZdeqtraWknStGnTNHr0aE2ePFm7du2S2WzWo48+GihwAAAAAADSxRdfLEmqrKwMGm9qBHXp0kW9e/eW2WxWSUlJUIzL5VJpaan69u0bGOvbt6/Kyspkt9uDYvfs2SNJ6tevX5tfAwAAAICWxUxzSJIKCwv13HPPKSsrS4cPH5bb7Zbf79fu3btVXV0tv9+v7OxsPffccxo3blyk0wUAAACAqDJ+/HhJ0quvvho0/uqrr8psNmv48OFKTU3VyJEjtWHDhqCmz/r161VXV6eCgoLAWEFBgbxer9auXRsYc7lcev3115WXl6dzzz03xFcEAAAAoDnmSCfQ1kaNGqXNmzfrww8/VHFxsaqqquT1epWenq7BgwdrxIgRSkhIiHSaAAAAABB1+vfvr2uuuUavvfaavF6vhg0bpp07d2rz5s2aPXu2unXrJkmaN2+epk2bpqKiIk2ZMkUVFRVavXq18vPzNWbMmMB8eXl5Kigo0JIlS1RTU6Ps7GytW7dO33zzjR5++OFIXSYAAAAQ9wy/3++PdBKIPTU1dvl8vLUAAADQMpPJUFpaSqTTwL+43W4tX75cr7/+uqqqqpSZmanrr79eM2bMCIrbtWuXFi9erL1798pms2n8+PGaP3++UlKC/1s6nU4tXbpUGzduVG1trfr06aPbb79do0ePDuNVtR1qHQAAALRWNNc6MdMcGjt2rEwmk1auXKns7OxIpxP3KJgAAADQWtFcMAHHo9YBAABAa0VzrRMz28pVV1fLYrHQGAIAAAAAAAAAADgFU6QTaCsZGRmKkZugAAAAAAAAAAAAQiZmmkOXXnqpGhoatHfv3kinAgAAAAAAAAAAELVi5plDBw4c0MSJE9W7d2+tXr1aHTp0iHRKQT777DP9/ve/V3FxsZxOp3r06KEpU6Zo+vTpkqSioiLt3LnzhNfl5+dr5cqV4U73rLEPNwAAAFormvfhBo5HrQMAAIDWiuZaJ2aeOZSQkKAHH3xQ999/vyZMmKCioiINGjRIXbp0UUJCwklfl5mZGfLctm3bpjlz5qh///6aO3eukpOTVV5eroqKiqC47t27a/78+UFjGRkZIc8PAAAAAAAAAADEj5i5c6hfv36n/RrDMEK+DZ3dbtfll1+uQYMG6cknn5TJ1PxOfkVFRfruu++0adOmkOYTLqymAwAAQGtF82o64HjUOgAAAGitaK51YuaZQ36//7R/fD5fyPPauHGjDh06pHnz5slkMqmuru6U5/V4PHI4HCHPCwAAAAAAAAAAxKeY2Vbub3/7W6RTaNb777+vlJQUVVZWau7cuSorK1NycrIKCwt1zz33yGq1BmLLyso0cOBAud1ude3aVddee61uueUWWSyWCF4BAAAAAAAAAACIJTHTHDrvvPMinUKzysrK5PV6NXfuXE2ePFl33nmndu7cqRdffFHHjh3TkiVLJEk9evTQiBEj1Lt3b9XV1WnLli1atmyZysrKtHTp0ghfBQAAAAAAAAAAiBUx88yhaPWjH/1IBw4c0LRp0/TAAw8Exu+//36tXbtWW7ZsUc+ePZt97X333aeXX35Za9eu1cCBA8OUcdtgH24AAAC0VjTvww0cj1oHAAAArRXNtU7MPHPoeIcPH9ann36qDz/8MKJ5JCUlSZImTJgQNH7VVVdJknbv3n3S186cOVOStH379hBlBwAAAAAAAAAA4k3MbCvX5G9/+5ueeuopff7555IkwzC0d+/ewPHa2lrNnz9fkrR06VKlpqaGNJ+MjAz985//VFpaWtB4ly5dAvmczLnnnttiDAAAAAAAAAAAwOmIqTuHnn32Wd16660qLS2V3+8P/Hxfp06dlJSUpO3bt2vz5s0hz+niiy+WJFVWVgaNV1VVSfp3k6g5Bw4caDEGAAAAAAAAAADgdMRMc2j37t16/PHHlZCQoLvvvlsffPCBunbt2mxsYWGh/H5/WLZrGz9+vCTp1VdfDRp/9dVXZTabNXz4cNntdrlcrqDjfr9fy5YtkyTl5+eHPE8AAAAAAAAAABAfYmZbuTVr1kiSZs+erRtvvPGUscOGDZOkoO3mQqV///665ppr9Nprr8nr9WrYsGHauXOnNm/erNmzZ6tbt27asWOH7rzzTl155ZXKysqS0+nU22+/reLiYk2dOjVw9xEAAAAAAAAAAMDZipnmUHFxsSTphhtuaDG2S5cu6tChQ2Brt1B74IEHlJmZqddff11bt25VZmam7r77bs2YMUOSlJmZqSFDhujtt9/WoUOHZDKZdOGFF+qBBx7Q1KlTw5IjAAAAAAAAAACID4b/+IfytFMDBgyQ1WrVrl27AmP5+fmqqalRaWnpCfEjRoyQw+FQSUlJONOMGzU1dvl8MfHWAgAAQIiZTIbS0lIinQbQKtQ6AAAAaK1ornVi5plDycnJamhokNfrbTHW4XDo2LFjOuecc8KQGQAAAAAAAAAAQPSImebQBRdcIK/Xq3379rUYu3XrVvl8PvXt2zcMmQEAAAAAAAAAAESPmGkOjR07Vn6/X8uXLz9lXEVFhX73u9/JMAxdfvnlYcoOAAAAAAAAAAAgOsRMc+iGG25Qt27d9Ne//lV33XWXvvjii8Axt9utsrIyrV69Wj/5yU9UVVWlnj17atKkSRHMGAAAAAAAAAAAIPwMv98fM0/SLC0t1axZs3T48GEZhtFsjN/vV0ZGhp5//nldeOGFYc4wfvCQVgAAALRWND+kFTgetQ4AAABaK5prnZi5c0iS+vXrp/Xr1+snP/mJEhMT5ff7g37MZrOuvvpqvfbaazSGAAAAAAAAAABAXIqpO4e+z+VyqaSkRFVVVfL5fOratasGDBigDh06RDq1uMBqOgAAALRWNK+mA45HrQMAAIDWiuZaxxzpBM7Erbfeqk6dOunhhx8OjB08eFAJCQnq1q2bJCkxMVGDBw+OVIoAAAAAAAAAAABRqV02h7Zu3aquXbsGjY0dO1bp6el69913I5QVAAAAAAAAAABA9GuXzxwymUzy+XwnjMfoDnkAAAAAAAAAAABtpl02hzp16qQjR47o2LFjkU4FAAAAAAAAAACgXWmX28oNGDBA7777rubMmaMrrrhCNptNkuR0OvXGG2+c1lyTJk0KRYoAAAAAAAAAAABRyfC3w73Ydu3apRkzZsjj8cgwDEmNW8o1/XNrGYahvXv3hiLFuFdTY5fP1+7eWgAAAIgAk8lQWlpKpNMAWoVaBwAAAK0VzbVOu7xzaOjQofrjH/+oNWvW6IsvvlB9fb2++eYbmUwmdevWLdLpAQAAAAAAAAAARK12eedQc/r27auuXbtq27ZtkU4FYjUdAAAAWi+aV9MBx6PWAQAAQGtFc61jinQCAAAAAAAAAAAACJ92ua1ccz7//PNIpwAAAAAAAAAAABD1uHMIAAAAAAAAAAAgjsTMnUPf5/P5VFZWptraWnk8nlPGDhs2LExZAQAAAAAAAAAARF5MNYeqqqq0ZMkSbdmyRQ0NDS3GG4ahvXv3hiEzAAAAAAAQ7wxDslotslgSZDablJDw7w1dvF6fPB6f3G6vnE63/P4IJgoAAGJezDSHKisrNWXKFFVVVcnfyk9QrY0DAAAAAAA4U4ZhyGZLlNVqkclkNBtjNifIbE5QUpJFNptVTqdbDoeL7y4AAEBIxExz6KmnnlJlZaVsNpvmzZuncePGKSMjQwkJCZFODQAAAAAAxCmr1ayUFKtMpsa7hEr2H9K7ew7qywNHVF5xVE63V1ZLgrK6d9RFPc7R6LxM5fTqqg4dEmW1mmW3O+V0nnrLfAAAgNNl+GNkCcoPfvADVVVV6fHHH1dBQUGk04l7NTV2+Xwx8dYCAABAiJlMhtLSUiKdBtAq1Do4HTabVcnJiZKkj/dVacX6EpVXHmvxdVndU3VTYY4G9cmQJNXVueRwOEOaKwAAaHvRXOvETHNowIAB8vv92r17t8zmmLkhqt2iYAIAAEBrRXPBBByPWget1dQY8nh8Wr7uE23+4KvTnqPgkmzNvjpXZrOJBhEAAO1QNNc6ppZD2oe0tDQlJSXRGAIAAAAAABFltZoDjaHfrNpxRo0hSdr8wVf6zaod8nh8Sk5u3GYOAACgLcRMc2jkyJFyOBwqKyuLdCoAAAAA0C7t2LFDffr0afZn9+7dgbiioqJmY2bNmnXCnC6XS4899pjy8/OVm5ura6+9Vu+99144LwsIK8MwlJJilSQtX/eJivdVndV8xfuqtHzdJ5KklBSrDMM46xwBAABiZsnJnDlztGXLFi1evFhPPfVUpNMBAAAAgHarqKhIAwYMCBrLysoK+r179+6aP39+0FhGRsYJcy1YsEBbtmzR9OnT1bNnT61bt04333yzXnjhBQ0dOrTtkwcizGZLlMlkUvG+qjO+Y+h4mz/4SpfmZmpQnwzZbImy29leDgAAnJ2YaQ5lZ2dr2bJluu222zRz5kzNnj1bubm5Sk5OjnRqAAAAANCuDB06VAUFBaeMSU1N1cSJE08Z88knn+jNN9/UXXfdFbiraNKkSZowYYIWL16sl156qc1yBqKBYUhWq0WStHJ9SZvOvWJDiZ7+5VhZrRY5HE7FxhOkAQBApMRMc6hfv36Bf/7ggw/0wQcftPgawzC0d+/eUKYFAAAAAO2S3W5v8bmuHo9HTqdTNput2eObN29WQkKCpk6dGhizWq2aPHmylixZom+//Vbnnntum+cORIrVapHJZKhk/yGVVx5r07nLK46pZP8h5fTqKqvVooYGd5vODwAA4kvMPHPI7/ef0Q8AAAAAINjdd9+tIUOGKDc3V0VFRfr0009PiCkrK9PAgQM1ePBgjRo1SkuXLpXbHfxldWlpqXr27KmUlJSg8dzc3MBxIJZYLAmSpHf3HAzJ/Nv+NW/TeQAAAM5UzNw5tGbNmkinAAAAAADtmsVi0eWXX64xY8aoc+fO2r9/v1auXKkbbrhBL730kvr37y9J6tGjh0aMGKHevXurrq5OW7Zs0bJly1RWVqalS5cG5quurlZ6evoJ52kaq6qqCs+FAWFiNjeuwf3ywJGQzN80b9N5AAAAzlTMNIeGDx8e6RQAAAAAoF0bPHiwBg8eHPh93Lhxuvzyy1VYWKjf/e53WrlypSRp4cKFQa+bNGmS7rvvPr388suaMWOGBg4cKElqaGhQYmLiCeexWq2B40AsSUhobNqUVxwNyfxNW9U1nQcAAOBM8WkCAAAAAHBS2dnZGjdunHbs2CGv13vSuJkzZ0qStm/fHhhLSkqSy+U6IdbpdAaOA7HI6T75/ytnNa/LE5J5AQBA/KE5FCafffaZ5syZo+HDhysvL08TJkw4YSu84uJiXXfddcrLy9OoUaP00EMPyeFwRChjAAAAAGjUvXt3ud1u1dfXnzTm3HPPlSTV1tYGxtLT01VdXX1CbNNYRkZGG2cKRAdriJ4JZE2MmQ1gAABAhPGpIgy2bdumOXPmqH///po7d66Sk5NVXl6uioqKQExpaalmzJihXr16acGCBaqoqNCqVatUVlamFStWRDB7AAAAAPHu66+/ltVqVXJy8kljDhw4IEnq0qVLYKxv377asWOH7Ha7UlJSAuN79uyRJPXr1y9EGQOR4fX6ZDYnKKt7R31R/l2bz5/VLTVwHgAAgLPRbptDbVFEGIahvXv3tkE2J2e32/WrX/1KP/zhD/Xkk0/KZGr+Zq0lS5aoY8eOevHFFwNF0/nnn697771X27ZtU35+fkjzBAAAAIDDhw8HNXck6fPPP9c777yj0aNHy2QyyW63KzExMehZQn6/X8uWLZOkoNqloKBAq1at0tq1azVr1ixJksvl0uuvv668vLzA3UZArPB4GptDF/U4JyTNoYt6nBM4DwAAwNlot80hv98f6RRaZePGjTp06JDmzZsnk8mkuro6JSUlBTWJ7Ha7tm/frhtvvDFoNd3EiRO1cOFCvfXWWzSHAAAAAITcHXfcoaSkJA0aNEhpaWn68ssv9fLLLyspKUm/+MUvJDVumX3nnXfqyiuvVFZWlpxOp95++20VFxdr6tSpuvjiiwPz5eXlqaCgQEuWLFFNTY2ys7O1bt06ffPNN3r44YcjdZlAyLjdXiUlWTQ6L1N/ee//2nz+/LzMwHkAAADORrttDt16662RTqFV3n//faWkpKiyslJz585VWVmZkpOTVVhYqHvuuUdWq1X79u2Tx+NRTk5O0GsTExPVr18/lZaWRih7AAAAAPHkRz/6kTZu3Kjnn39edrtdnTt31o9//GPdeuutys7OliRlZmZqyJAhevvtt3Xo0CGZTCZdeOGFeuCBBzR16tQT5ly0aJGWLl2qDRs2qLa2Vn369NEf/vAHDRs2LNyXB4Sc0+mWzWZVTq+uyuqWqvLKY202d1b3VOX06iqfzy+n091m8wIAgPhk+NvLLTjtVGFhocrLyyVJkydP1vDhw7Vz5069+OKLuvLKK7VkyRJt3rxZt99+u/70pz9p6NChQa+//fbb9dFHH2nbtm2RSP+M1dTY5fPx1gIAAEDLTCZDaWkpLQcCUYBaBy1JSbGqQ4dEfbyvSvc/+36bzfvgzSM1qE+G6utdstudbTYvAAAInWiuddrtnUPtRV1dnerr6zVt2jTde++9kqTLLrtMLpdLa9eu1W233aaGhgZJCtqzu4nVag0cBwAAAAAA0c3hcMlqNWtQnwwVXJKtzR98ddZzFlySrUF9MuTz+eRwuNogSwAAEO9MLYfgbCQlJUmSJkyYEDR+1VVXSZJ2794diHG5TvyA53Q6A8cBAAAAAEB08/v9gTt7Zl+dq8F9Ms5qvsF9MjT76lxJkt3ubDfPYAYAANGN5lCIZWQ0fghMS0sLGu/SpYskqba2Vunp6ZKkqqqqE15fXV0dmAMAAAAAAEQ/p9OjujqXzGaT7vvZCBVckn1G8xRckq37fjZCZrNJdXUuOZ2eNs4UAADEK5pDIXbxxRdLkiorK4PGmxpBXbp0Ue/evWU2m1VSUhIU43K5VFpaqr59+4YnWQAAAAAA0CYcDmegQXTLtQP14M0jldU9tVWvzeqeqgdvHqlbrh0YaAw5HDxnCAAAtB2aQyE2fvx4SdKrr74aNP7qq6/KbDZr+PDhSk1N1ciRI7VhwwbZ7fZAzPr161VXV6eCgoKw5gwAAAAAAM6ew+HU0aP18vl8GtQnQ0//cqwemTtKV466QH2yOquD1SyTIXWwmtUnq7OuHHWBHpk7Sk//cmzgGUNHj9bTGAIAAG3OHOkEYl3//v11zTXX6LXXXpPX69WwYcO0c+dObd68WbNnz1a3bt0kSfPmzdO0adNUVFSkKVOmqKKiQqtXr1Z+fr7GjBkT4asAAAAAAABnwun0yOXyymZLlNVqUU6vrsrp1fWUr/H5/HI63XI4XDxjCAAAhITh51NGyLndbi1fvlyvv/66qqqqlJmZqeuvv14zZswIitu1a5cWL16svXv3ymazafz48Zo/f75SUlIik/hZqKmxy+fjrQUAAICWmUyG0tLa32dexCdqHZwNw5CsVosslgSZzSYlJPx7Qxev1yePxye32yun0y2+rQEAoP2L5lqH5hBCgoIJAAAArRXNBRNwPGodAAAAtFY01zo8cwgAAAAAAAAAACCO0BwCAAAAAAAAAACIIzSHAAAAAAAAAAAA4gjNIQAAAAAAAAAAgDhCcwgAAAAAAAAAACCO0BwCAAAAAAAAAACIIzSHAAAAAAAAAAAA4gjNIQAAAAAAAAAAgDhCcwgAAAAAAAAAACCO0BwCAAAAAAAAAACIIzSHAAAAAAAAAAAA4gjNIQAAAAAAAAAAgDhCcwgAAAAAAAAAACCO0BwCAAAAAAAAAACIIzSHAAAAAAAAAAAA4gjNIQAAAAAAAAAAgDhCcwgAAAAAAAAAACCO0BwCAAAAAAAAAACIIzSHAAAAAAAAAAAA4gjNIQAAAAAAAAAAgDhCcwgAAAAAAAAAACCO0BwCAAAAAAAAAACIIzSHAAAAAAAAAAAA4gjNIQAAAAAAAAAAgDhCcwgAAAAAAAAAACCO0BwCAAAAAAAAAACIIzSHAAAAAAAAAAAA4gjNIQAAAAAAAAAAgDhCcwgAAAAAAAAAACCO0BwCAAAAAAAAAACIIzSHAAAAAAAAAAAA4gjNIQAAAAAAAAAAgDhCcwgAAAAAAAAAACCOmCOdQKzbsWOHpk+f3uyxtWvXauDAgZKkoqIi7dy584SY/Px8rVy5MqQ5AgAAAIDU+vpFkoqLi/XYY49p7969SklJ0fjx4zVv3jzZbLag17lcLj3xxBNav369jh49qj59+uiOO+7QqFGjQnotAAAAAE6O5lCYFBUVacCAAUFjWVlZQb93795d8+fPDxrLyMgIeW4AAAAA8H0t1S+lpaWaMWOGevXqpQULFqiiokKrVq1SWVmZVqxYEfS6BQsWaMuWLZo+fbp69uypdevW6eabb9YLL7ygoUOHhuV6AAAAAASjORQmQ4cOVUFBwSljUlNTNXHixDBlBAAAAADNa6l+WbJkiTp27KgXX3xRKSkpkqTzzz9f9957r7Zt26b8/HxJ0ieffKI333xTd911l2bNmiVJmjRpkiZMmKDFixfrpZdeCv3FAAAAADgBzxwKI7vdLo/Hc8oYj8cjh8MRpowAAAAAoHknq1/sdru2b9+uwsLCQGNIkiZOnKjk5GS99dZbgbHNmzcrISFBU6dODYxZrVZNnjxZH3/8sb799tvQXgQAAACAZtEcCpO7775bQ4YMUW5uroqKivTpp5+eEFNWVqaBAwdq8ODBGjVqlJYuXSq32x2BbAEAAADEs1PVL/v27ZPH41FOTk7QaxITE9WvXz+VlpYGxkpLS9WzZ8+gJpIk5ebmBo4DAAAACD+2lQsxi8Wiyy+/XGPGjFHnzp21f/9+rVy5UjfccINeeukl9e/fX5LUo0cPjRgxQr1791ZdXZ22bNmiZcuWqaysTEuXLo3wVQAAAACIB62pX6qrqyU1/3zU9PR0ffTRR4Hfq6urlZ6e3mycJFVVVYXoSgAAAACcCs2hEBs8eLAGDx4c+H3cuHG6/PLLVVhYqN/97ndauXKlJGnhwoVBr5s0aZLuu+8+vfzyy5oxY4YGDhwY1rwBAAAAxJ/W1C8NDQ2SGu8UOp7Vag0cl6SGhoaTxjUdBwAAABB+bCsXAdnZ2Ro3bpx27Nghr9d70riZM2dKkrZv3x6u1AAAAAAgyPH1S1JSkiTJ5XKdEOt0OgPHJSkpKemkcU3HAQAAAIQfzaEI6d69u9xut+rr608ac+6550qSamtrw5UWAAAAAJzg+/XLqbaEq66uDtpuLj09PbAN3fFxUvNb0wEAAAAIPZpDEfL111/LarUqOTn5pDEHDhyQJHXp0iVcaQEAAADACb5fv/Tu3Vtms1klJSVBMS6XS6Wlperbt29grG/fviorK5Pdbg+K3bNnjySpX79+oU8eAAAAwAloDoXY4cOHTxj7/PPP9c4772jUqFEymUyy2+0nbLXg9/u1bNkySVJ+fn5YcgUAAAAQ31pTv6SmpmrkyJHasGFDUNNn/fr1qqurU0FBQWCsoKBAXq9Xa9euDYy5XC69/vrrysvLC+yWAAAAACC8zJFOINbdcccdSkpK0qBBg5SWlqYvv/xSL7/8spKSkvSLX/xCkvTZZ5/pzjvv1JVXXqmsrCw5nU69/fbbKi4u1tSpU3XxxRdH+CoAAAAAxIPW1C+SNG/ePE2bNk1FRUWaMmWKKioqtHr1auXn52vMmDGBuLy8PBUUFGjJkiWqqalRdna21q1bp2+++UYPP/xwJC4RAAAAgCTD7/f7I51ELFuzZo02btyo8vJy2e12de7cWSNHjtStt96q7OxsSY3bxy1evFiffvqpDh06JJPJpAsvvFBTpkzR1KlTZRhGhK/i9NXU2OXz8dYCAABAy0wmQ2lpKZFOA2pd/dJk165dWrx4sfbu3Subzabx48dr/vz5SkkJ/m/pdDq1dOlSbdy4UbW1terTp49uv/12jR49OpyX1maodQAAANBs13EnAAAgAElEQVRa0Vzr0BxCSFAwAQAAoLWiuWACjketAwAAgNaK5lqHZw4BAAAAAAAAAADEEZpDAAAAAAAAAAAAcYTmEAAAAAAAAAAAQByhOQQAAAAAAAAAABBHaA4BAAAAAAAAAADEEZpDAAAAAAAAAAAAcYTmEAAAAAAAAAAAQByhOQQAAAAAAAAAABBHaA4BAAAAAAAAAADEEZpDAAAAAAAAAAAAcYTmEAAAAAAAAAAAQByhOQQAAAAAAAAAABBHaA4BAAAAAAAAAADEEXOkEwBOh2FIVqtFFkuCzGaTEhL+3d/0en3yeHxyu71yOt3y+yOYKAAAAACcBmodAAAAhBPNIbQLhmHIZkuU1WqRyWQ0G2M2J8hsTlBSkkU2m1VOp1sOh0t+KicAAAAAUYpaBwAAAJFAcwhRz2o1KyXFKpOpceVcyf5DenfPQX154IjKK47K6fbKaklQVveOuqjHORqdl6mcXl3VoUOirFaz7HannE5PhK8CAAAAAIJR6wAAACBSDD9LjRACNTV2+Xxn/9ay2axKTk6UJH28r0or1peovPJYi6/L6p6qmwpzNKhPhiSprs4lh8N51vkAAACg7ZlMhtLSUiKdBtAq1DoAAABorWiudWgOISTaomBqKpY8Hp+Wr/tEmz/46rTnKLgkW7OvzpXZbKJoAgAAiFLRXDABx6PWAQAAQGtFc61jajkECD+r1Rwoln6zascZFUuStPmDr/SbVTvk8fiUnNy49QIAAAAARAq1DgAAAKIBzSFEHcMwlJJilSQtX/eJivdVndV8xfuqtHzdJ5KklBSrDKP5h7wCAAAAQChR6wAAACBa0BxC1LHZEmUymVS8r+qMV9Edb/MHX+njfVUymUyy2RLbZE4AAAAAOB3UOgAAAIgWNIcQVQxDslotkqSV60vadO4VGxrns1otYkEdAAAAgHCi1gEAAEA0oTmEqGK1WmQyGSrZf0jllcfadO7yimMq2X9IJpMRKMoAAAAAIByodQAAABBNaA4hqlgsCZKkd/ccDMn82/41b9N5AAAAACAcqHUAAAAQTWgOIaqYzY1vyS8PHAnJ/E3zNp0HAAAAAMKBWgcAAADRhE+NiCoJCY1vyfKKoyGZv2n7hqbzAAAAAEA4UOsAAAAgmvCpEVHJ6faGZl6XJyTzAgAAAEBrUOsAAAAgGtAcQlSyhmifbGuiOSTzAgAAAEBrUOsAAAAgGtAcQlTxen2SpKzuHUMyf1a31KDzAAAAAEA4UOsAAAAgmtAcQlTxeBoLmYt6nBOS+ZvmbToPAAAAAIQDtQ4AAACiCc0hRBX3v/bfHp2XGZL58/81rztE+3wDAAAAQHOodQAAABBNaA4hqjidbvl8fuX06hrYFqGtZHVPVU6vrvL5/HI63W06NwAAAACcCrUOAAAAognNIUQVv1+BYuamiTltOvdNhY3zOZ1u+f1tOjUAAAAAnBK1DgAAAKIJzaEQ27Fjh/r06dPsz+7du4Nii4uLdd111ykvL0+jRo3SQw89JIfDEaHMI8fhcMnn82lQnwwVXJLdJnMWXJKtQX0y5PP55HC42mTO/9/efcZHVaf9H/9OSSaNACGQEHoSCGBAKdKFRVBEQG7UdXV5oeAivbiCiC77R1HEgqxIFQERBEHpsPQiKNIjQpAiEYRQQokQEkid+T/gzty0UE8ySc7n/QgyJde5mGR+X64zvwMAAAAAd4OsAwAAgPzC7ukCzKJTp06qUaPGNV8rX768+8/79u1T586dFRERocGDB+vUqVOaOnWqjhw5osmTJ+d1uR7lcrmUnJymwEBfde9QU6f/vKyYA6fv+flqR5VS9w41JUnJyWlycSodAAAAAA8g6wAAACC/YDiUR+rWrasnnngix9tHjRqlwMBAzZgxQwEBAZKksmXLasiQIfrxxx/VpEmTvCo1X0hLy9SlS+ny8/PWv1+ur88X7NaKLX/c9fM80aCCuneoKbvdqkuX0pWWlpkL1QIAAADAnSHrAAAAID9gW7k8lJycrMzMGxfsycnJ+umnn/TUU0+5B0OS1L59e/n5+Wn58uV5WWa+kZKSpkuX0mW3W9X7rw9pWLeGKh96ZxduLR9aRMO6NVTvvz7kDkspKWm5XDEAAAAA3B5ZBwAAAJ7GJ4fyyJtvvqlLly7JZrOpTp06GjRokHubuQMHDigzM1PR0ddelNTb21vVqlXTvn37PFFyvpCSkqbMzCwFBDhUK6qUxr3+qGLjzurHX07o0LHzOppwUWnpmXJ421U+pIgiyxVTkwfDFB0RLElyOp1KTk7jLDoAAAAA+QpZBwAAAJ7EcCiXeXl5qVWrVmratKmKFy+uuLg4TZkyRR07dtTs2bNVvXp1nTlzRpJUqlSpGx5fsmRJ7dy5M6/LzlfS0jKVnp4lf39vORxeio4IdgeinDidLqWlZSglJZ19twEAAADkS2QdAAAAeArDoVxWu3Zt1a5d2/33Fi1aqFWrVnrqqaf0ySefaMqUKUpNTZV05ZNC13M4HO7bzSz7wq0pKWlyOLzk5WWT3W6VzfZ/OyNmZTmVmelURkaW0tIyRE4CAAAAkN+RdQAAAOAJDIc8oEKFCmrRooVWrVqlrKws+fj4SJLS09NvuG9aWpr7dkgul5SamqHU1AxPlwIAAAAAhiHrAAAAIC8xHPKQ0NBQZWRk6PLlyypZsqQk6fTp0zfc78yZMzfdbi6/s1otni4BAAAABQRrRxQkvF4BAABwp/Lz2pHhkIfEx8fL4XDIz89PVapUkd1uV2xsrJ588kn3fdLT07Vv3z61bt3ag5Xem+LF/T1dAgAAAAAYjqwDAACAwsB6+7vgfiQmJt7wtf3792vdunVq3LixrFarihQpooYNG2rx4sVKTk5232/RokW6dOmSnnjiibwsGQAAAAAAAAAAFGIWl4tLWeamF198UT4+PqpVq5ZKlCihQ4cO6dtvv5XdbtecOXMUEREhSdq7d6+ef/55RUZG6rnnntOpU6f05Zdf6uGHH9aUKVM8fBQAAAAAAAAAAKCwYDiUy6ZPn64lS5bo6NGjSk5OVvHixdWwYUP16dNHFSpUuOa+O3bs0MiRI/Xrr7/K399frVu31muvvaaAgAAPVQ8AAAAAAAAAAAobhkMAAAAAAAAAAAAmwjWHAAAAAAAAAAAATIThEAAAAAAAAAAAgIkwHAIAAAAAAAAAADARhkMAAAAAAAAAAAAmwnAIAAAAAAAAAADARBgOAQAAAAAAAAAAmAjDIQAAAAAAAAAAABNhOAQAAAAAAAAAAGAiDIcAAAAAAAAAAABMhOEQAAAAAAAAAACAiTAcAgAAAAAAAAAAMBGGQwAAAAAAAAAAACbCcAgAAAAAAAAAAMBEGA4BAAAAAAAAAACYCMMhAAAAAAAAAAAAE2E4BAAAAAAAAAAAYCIMhwAAAAAAAAAAAEyE4RAAAAAAAAAAAICJMBwCAAAAAAAAAAAwEYZDAAAAAAAAAAAAJsJwCAAAAAAAAAAAwEQYDgEAAAAAAAAAAJgIwyEAAAAAAAAAAAATYTgEAAAAAAAAAABgIgyHAAAAAAAAAAAATIThEAAAAAAAAAAAgIkwHAIAAAAAAAAAADARhkMAAAAAAAAAAAAmwnAIAAAAAAAAAADARBgOAQAAAAAAAAAAmAjDIQAAAAAAAAAAABNhOAQAAAAAAAAAAGAiDIcAAAAAAAAAAABMhOEQAAAAAAAAAACAiTAcAgAAAAAAAAAAMBGGQwAAAAAAAAAAACbCcAgAAAAAAAAAAMBEGA4BAAAAAAAAAACYCMMhAADyiMvl8nQJAAAAAGA4sg4AFDwMhwAUSllZWZ4uoVBhoW+My5cvS6KfAAAAuDfkHOOxNjcGWQcACh6GQ0A+43Q6PV1CgbZ7925Jks1m83AlBd/p06d14sQJJSQkKC0tzf11Fvv35ptvvlHbtm2VlJQki8Xi6XIKHV6XxssO+Lh/V78+ea0CMCtyzv0h5xiLrGMssk7u4nVpPLKOccg6BRvDIcCDYmNjtWbNGs2cOVP79+9XcnKyrFYrZ4Pdo3nz5um5557TmjVrPF1Kgbd06VJ17txZbdq0UYsWLdSnTx8tWrRIkljs34P58+fr3XffVZMmTZSZmenpcgqllJQUpaen6+LFi54upVBYunSpRo4cqeTkZE+XUiicP39eSUlJSkxMvOZ3KOEJQGFFzjEWOcdYZB1jkXVyH1nHWGQdY5F1Cja7pwsAzGrhwoUaMWKEsrKylJycLD8/P9WoUUPvvPOOKlas6OnyCpz58+frX//6lzp37qzo6GhPl1OgrVu3ToMHD1a7du3UpUsXJSUlaeHChXrjjTd08OBB9e/fX97e3p4us8CYP3++3nrrLXXu3FmdO3dWUFDQNbe7XC5C6H1atWqV5syZo/j4ePn4+KhDhw5q2LChoqKiPF1agZT9mu3Zs6enSykUli1bpunTp+vo0aOy2Wxq3bq1mjdvroYNG8pisfA7AEChQ84xFjnHWGQdY5F1ch9Zx1hkHWORdQo+29tvv/22p4sAzGbnzp0aNGiQnnnmGb366qsaNGiQMjIytHPnTs2YMUNRUVEEp7uQ/eb+0ksvqXPnzipduvRN7+d0OnlTuoXsN+2xY8fK19dXb7/9turXr6/atWurdu3astvtmj59uhISEtSgQQNC0x1YunSp3njjDXXv3l1///vfFRYWJkk6ePCgDh8+7P4ou6+vryfLLNCWL1+uAQMGKCIiQuHh4XI4HJoxY4b27Nkjh8OhqlWrerrEAmXevHnu/4Dq2LGjSpQo4emSCrRVq1Zp4MCBqlWrlurXr6+KFStq9uzZ2rJli9LS0lSnTh1CE4BChZxjLHKOccg6xiPr5D6yjrHIOsYi6xQODIcAD/jxxx+1bds2vf7663rwwQfl4+OjRo0aKSIiQocPH9aMGTMUGRmp8PBwfonextq1azVgwAC9/PLL+sc//qGQkBBJVxaqGzdu1MaNG+VyuVSsWDH5+PgQnG7D6XRq0qRJKl26tP76178qKytLVqtVwcHBqlq1qooUKaIZM2YoKSlJzZo183S5+dqJEyf07rvv6uzZs+rTp4+qV68uSRo8eLD+85//aNasWfr222+1d+9eBQQEKDw83MMVFzzJycn64IMPVKVKFQ0dOlTt27dXmzZtFB0drTVr1uiHH36Q1WrVQw895OlSC4SlS5dq8ODBNwT8M2fOKDk5WampqfLz8/NwlQVHenq6PvnkE4WGhmro0KF67LHH1LRpUzVo0ED79+/XwoULlZycrMaNGxOaABQa5BzjkHOMR9YxDlkn95F1jEXWMRZZp/BgOAR4wIoVK7Rnzx4NHjxYVqtV6enpstlsKl++vKpUqaK4uDjNmjVLjRs3VkhICAv9HFy+fFnz58/Xrl27FBYWpg4dOkiSevXqpalTp2rLli2KiYnR8uXLdeTIEdWpU0cBAQH0MwcWi0VWq1U7duzQL7/8og4dOsjX19cdmvz9/RUeHi6bzaZp06bJz89PtWrV8nTZ+Za/v7/8/f0VFxenlStXqlWrVhoyZIg2bNigZ555Rs8995wiIyO1evVqbdmyRaVLl1ZkZKSnyy5QLl68qLFjx6pp06Zq2bKlJCkrK0vh4eGqWbOmdu/erXXr1rm3s0HO9u7dq65du6ps2bLq2bOnKleuLEl67733NH78eE2dOlULFiyQJJUoUUJFixb1ZLkFwuXLlzVu3Dg9+OCDatu2rVwul5xOp8qUKaMaNWro3Llzmjt3rjIzM9WgQQPelwAUCuQcY5BzjEfWMRZZJ/eRdYxD1jEeWafwYDgEeIDFYtG8efNUvHhx1axZUzabzb0oDQkJUbly5RQTE6OlS5fqySeflL+/v6dLzpe8vLxUqVIlORwOzZ49W4mJiVq8eLF27typgQMHqm/fvnr55Zd1+vRpff/994qLi1OTJk34WHsOss/kSElJ0Zo1a5SZmanatWvLy8vLHTT9/PxUtmxZHT16VOvXr1eDBg0UHBzs6dLzHZfLJavVqsqVK6tEiRLavHmzxowZo6SkJH388cf661//qujoaDVo0ECRkZFau3atDh06pPr167MQvQtOp1PLly+XxWLRo48+Krvd7l50li5dWlWqVNHWrVu1c+dOVaxYUeXLl/dwxflXqVKlFB8fr7179+ry5cuqV6+eBgwYoBUrVqh69ep66KGHlJaWpnnz5ikxMVGVK1dW8eLFPV12vubt7a0NGzbo5MmTatWqlRwOhywWiywWi4KCglS5cmWdOHFCq1evdp+xDAAFHTnHGOQc45F1jEPWyRtkHeOQdYxH1ik8GA4BHmCxWLRjxw7t3btXUVFRKl26tKxWqzs4hYWFyWazafXq1SpWrJhq1arFRzBzEBgYqIiICFmtVs2aNUunTp3S8OHD1apVK4WGhqpo0aJq1aqV4uPjtXz5coWHh6tq1ar08yay+xEREaHNmzdrw4YNCgsLU3h4uOx2uzs0FS1aVH5+fpozZ44aNmzIGWA3kf2xaavVqoiICJUqVUqJiYlq1qyZ2rdvLx8fH/fPe6VKlWS1WrVw4UI9+uijKlu2rKfLLzC8vb21e/dubdq0SY0aNVJISIj7dWyxWBQSEqJKlSpp9uzZcjqdatmyJT/7N5H9WmzZsqWOHTum5cuXa8GCBTp16pQ+/vhj/eMf/1DLli3VqlUrFS1aVNOnT1dgYKDq169PP28jLi5Oq1evVlRUlMLDw2W1Wt09K168uCpUqKA1a9bo5MmTatasGf+pB6DAI+cYh5xjLLKOccg6eYOsYwyyTu4h6xQODIcADwgMDFRwcLC++uorXbx4UZGRkSpRosQ1wSk6OlqrVq3SmTNn1L59e96QbiF7D2OLxaLIyEg99dRTCggIkCRlZmbKarXq4Ycf1ty5c2W329WyZUv6mYOsrCx5eXmpRYsWWrJkiX766SeFhoaqQoUKstvt7q1BKlWqpK+//lolS5ZUo0aNPF12vnR1aAoPD1eRIkXUokULBQUFSZKsVqv79WmxWPTtt9+qWrVq7Bl9h7IDfHR0tJYsWaKff/5ZTzzxxA0XDy5Xrpwkafr06WrRooVKlizpiXLztavfex599FGdOHFCv/zyi/r16+c+C0ySHA6HoqOjdejQIS1cuFBt2rTh7M8cZIeievXqafXq1dq8ebMefvhhBQcHu383SFJISIgCAwM1ffp0NWvWTGXKlPFw5QBwf8g5xiLnGIusYxyyTu4i6xiHrGM8sk7hYvV0AYBZtWjRQv/+97+1atUqTZgwQb/++qskyWazuX+RhoWFKSUlxZNlFhilS5dWx44d1a1bt2s+/muz2SRdWRB4eXnRz9vI3vojMDBQEydOlN1u1/vvv6/Zs2crNTXVvRiNiYmR1Wrlo+u3kb0wstlsevzxxxUWFub++Xa5XLLb7ZKk2NhYBQQEqFq1ap4st0CxWq8sYUqWLKm+ffvqwIED6t+/v1JTU90fZ8/KypIk1a1bV1arVUeOHPFgxflb9s++JA0bNkxdu3ZV48aNrwmgLpdL3t7eqlu3rlJTU3XmzBlPlZvvWSwWOZ1O2Ww2DRkyRJcuXdKQIUMUFxfnvj0zM1OSVKtWLTkcDu3bt8+TJQOAYcg5xiLnGIesYyyyTu4h6xiLrGMssk7hwnAIyANOp/Oav2cvmDp27Kg333xTy5Yt04cffqjVq1dLuvKLNC4uTvHx8apQocINjze7nPpRpkwZlSpVyv33qz8CvGPHDmVmZrov1Jj9b2B2N+tDdtCsVKmSZsyYoeDgYH366afq3bu3Nm3apG+++UaTJk2SzWZTgwYN8rrkfO1m/bz+7M3shVT212NjY7Vs2TKVL19eEREReVJnYWKz2dSyZUv16dNHO3bsUPfu3XXs2DFlZGS4A2lSUpL8/f1VpEgRD1ebv10dmvr06aOKFSteE/CzX7NnzpxRUFCQSpQo4bFaC4LsUF+zZk0NHjxYp06d0oABA7RlyxZdunRJXl5ekqSTJ0/Kx8fnmvcvACgoyDnGIucYi6xjLLJO3iPrGIesYyyyTuHBtnJALjhy5IgOHTqkHTt2qEKFCtdc5FK6dgH10EMPqWLFilq4cKFWr16t2NhYbdy4UXPnztWxY8c0YsQI078p3a6fN3P1m/uuXbs0adIkXbx4UW+88YYCAwNNu91CRkbGNWfFZZ9xlP3GfjWn06kiRYro6aef1sWLFxUbG6uvvvpKMTExkqQxY8aYfoF/N/28msViUXp6ulatWqWJEyfq4MGDGj9+vHtbAFyRvVi/PmRez9vbWxEREQoJCdHKlSu1cuVKuVwuBQcH65dfftGsWbOUmpqqV155xdQXvr6Tfl7/2r3+NR0bG6vp06erbNmyeuaZZ27Y2gI3stlsKl++vKpUqaINGzZo4cKFSkxMVMmSJbVz507Nnj1bSUlJ6tOnj3urIADIr8g5xiLnGIusYyyyTu4i6xiLrOMZZJ2Cz+LitBLAUMuWLdO4ceN04sQJXb58WVWqVNGsWbNu+0tw9+7dWrt2rVatWiW73a7y5cvrn//8p+kvgHmv/ZSk5ORkTZkyRdu2bdPhw4c1depUVa1aNQ+qzp/WrFmjefPm6eDBgypXrpweeOAB9e3bVz4+Pjk+JisrSzabTU6nU2lpadq/f79KlCihgIAA937SZnUv/bzakCFDNHfuXFWvXl0ffvihKleunMsVF0zXXwTU6XTmGEhTU1O1b98+jRw5Urt27VJWVpb7osITJkww9c9/trvp5/U2bNig6dOna8+ePZo1a5bp35+kK9d7yD5r83ZcLpfOnDmjYcOGafv27bpw4YL7d+lnn33G6xNAvkfOMRY5x1hkHWORdfIGWcdYZB1jkXXMgeEQYKCVK1fq9ddf1xNPPKFGjRrpzz//1NixY9WhQwcNGTLkpo+52ZuXxWJRRkaG6c9SuJd+Xm3Lli3q3Lmz6tWrp6FDh5r6zK8lS5bozTffVHR0tMLDw7Vv3z7t27dPVapU0eDBg1WrVi35+vrm+Pjs4IQr7ref0pXF/YIFC9S8eXOFhobmUeUFx/r167V06VL98ccfqlixov7yl7+oefPm8vf3v6PX45YtW3T69GkVLVpUVatWVUhISB5Vnj/dTz+Tk5M1evRozZs3TyVLltRnn32mqKioPKw+/9m1a5f7gsp3E5qy/f7774qPj1exYsUUFham4ODg3CgTAAxDzjEWOcdYZB1jkXVyH1nHWGQdY5F1zIXhEGCQhIQE9ejRQ1WrVlWfPn1UpkwZZWZm6sUXX1SVKlV09Q6O1welbOnp6e6glNN9zMKIfkpSXFycihcvbuozv44dO6YuXbqoXr166tmzp8qVK6cLFy5o0qRJmjJlisLDw9W9e3e1bNnyho+hJyQkqHjx4vL29r6nRUFhZFQ/kbOlS5dq8ODBqlGjhoKCgnTgwAH9+eefio6O1qhRo1SiRIkczwIz++/Om7mffkpXAtOPP/6oI0eOqF27dipTpkweH0H+smLFCr366qtq2rSpJk2aJOneQhMAFBTkHGORc4xF1jEWWSf3kXWMRdYxFlnHfO7ss3UAbislJUWHDx9WnTp1VKZMGWVlZclut6tcuXJyOp365JNPNG7cOCUkJFzzuD/++ENLly6VpGsWUWZ/wzein5IUERFh+sB0/vx5nTp1So8++qi7f0WLFlXv3r314IMP6vfff9fo0aO1detWSf93IdzExET1799fjRo1UmpqKouB/2VUP3Fzp0+f1tixY9WuXTt9+umnGjdunJYuXaq///3vOnTokJ599lkdOXJEVqvVfUHRbBcuXHD3lgtcX3G//UxJSVFAQIBatGihrl27mj4s7d69Wx999JGKFi2qjRs3qkePHpIku92uzMzMWz42MTFRycnJknh9AihYyDnGIucYi6xjLLJO7iLrGIusYyyyjjkxHAIMkp6ertTUVF2+fNn9te+++06LFi3S9u3btXz5ck2ePFl/+9vftHbtWrlcLqWmpmrSpEkaOHCgpk2b5rni8yH6aZy0tDRlZmZec6ZMRkaG/Pz8VKNGDT344IPy8/PTRx99pAsXLrjv5+/vr2LFiiktLU1nz571VPn5Dv3MXenp6UpISFDdunUVEhKizMxM+fj4qF+/fnr99ddlsVj00ksvKSEhQTabzb3IT0hI0D//+U+99957Sk5OvuO9pQu7++3n8OHDlZycLC8vL9P/p0lKSooWL16sEydO6NVXX1WvXr30/fff31FoOn36tHr06KGePXvq/PnzvD4BFCisy41FP43F2txY9DN3kXWMRdYxDlnHvPjXAgwSHh6uli1bavjw4erSpYt69Oihf//73+ratasmT56slStX6vPPP1doaKiGDx+uhIQE+fj4qGXLlqpVq5YaN27s6UPIV+incSIiIhQcHKyvvvpKBw8elNVqlZeXl6Qre8FWqFBBnTp10pEjRzRmzBhJV870cDgcGjVqlNavX6+yZct68hDyFfqZu7y8vJSRkaHExERJVxahWVlZ8vLy0lNPPaUBAwYoKytLr7zyipKSktz7R9tsNu3evVvr169Xenq6Jw8hX7nffn7//ff083+5XC5ZrVZ16dJFL7zwgjp37qyuXbveUWgqVaqUzp49q5iYGPoJoMBhXW4s+mks1ubGop+5i6xjLLKOccg65mV7++oNbQHcM5vNpjp16igzM1O//fabUlJSVLx4cQ0aNEjlypWT1WpVyZIlVbJkSc2ZM0dZWVlq2rSpKlWqpLZt25r+AoLXo5/G8fX1Vc2aNTVmzBgdPnxYgYGBOnfunL744gutWrVKH374oZo1a6YffvhBCQkJevbZZ2W1WuV0OuXt7S0/Pz9PH0K+Qj9zT/ZlEGNiYhQbG6uHHnpIwcHB7m0AbDabIiIiZLfbtWbNGqWnp6tevXqyWq3y8/NTu3bt9Oyzz/Lz/7/op7G8vb1VpUoVNW/eXDabTQ6HQ/VQg1MAABX1SURBVFWrVpXVatX8+fMVGxurtm3bymq1Kj09/YYL37700kvq0KEDF2UGUOCwLjcW/TQWa3Nj0c/cw9rcWPTTWGQd82I4BBgoICBATZo0Ufv27VW9enWlpaWpXbt2kv7vIqzZi/yKFSuqefPmkmT6j6/mhH4aJywsTPXr19f06dM1f/58LV68WL/99puGDRumevXqyWazKSUlRatWrVK7du3k7+/PR4FvgX4aL/viqt7e3nI4HJo5c6bsdruqV68uPz+/axb5NWvW1NatW7V3714999xz7p/5IkWKKDAw0MNHkj/QT2M5nU5ZLBYFBAS4g5DL5ZKfn99NQ5PNZpPT6dTvv/8uX19f9xm3RYoU8eRhAMA9Y11uLPppLNbmxqKfxmNtbiz6aSyyjrmxsgAMZrVaFRAQIG9vb82cOVPNmjXTI488Im9vb7lcLu3atUuS3Be6y35Tw83RT+M8/PDDWrx4sXbv3q2LFy+qQYMGCgsLc9/++++/KygoyH22DW6Nft6/rVu3KjU1VY0bN3ZvAWCz2fTkk09q//79+uKLLxQUFKTnn39eQUFBstlsSktLk8PhUJcuXdStWzfFxsaqTp06nj6UfIF+Guv6fl7//mKxWORyuRQUFKQuXbpIkr744gv17NlTEyZM0K5du/Txxx8rKipKQ4cO5b0JQIHHutxY9NNYrM2NRT/vH2tzY9FPY5F1kI3hEJBLKlWqpMjISI0bN042m02NGjXS5s2b9dVXX8nb21tt2rSRJH6B3iH6aYyQkBA99thjN3z9119/1YEDB1SzZk25XC7C5x2in/du1apV6tevn6pVqyZvb2/3WYhOp1NWq1Xdu3fX+fPnNXbsWKWlpenZZ59VuXLl5HA4JEnx8fEKDAxUcHCwh48kf6Cfxsqpn9e7OjR17txZVqtVn3/+uTp27Kj09HQdOnRI77zzDj//AAoV1uXGop/GYW1uLPp571ibG4t+Gousg6uxrRyQSxwOh6Kjo/Xll1/qu+++08yZM7VixQpdvnxZEyZMUHh4uKdLLFDop7GuXsAvWLBAM2fO1P79+zVixAiVLFmSN/e7RD/vzoEDBzR8+HA5HA4lJSVp+/btCg8PV1hYmHtR6u3trVq1aik1NVVTp07V8ePH5XA4FB4erh07dmj+/PmyWCx67rnn5Ovr6+Ej8iz6aaxb9fNmZ8ZaLBZlZWXJ399f1apVU0JCgr7//nulpaXp66+/VlRUlAeOAgByD+tyY9FP47E2Nxb9vDuszY1FP41F1sH1LK7sK3gByBVxcXH67rvvdPLkSUVHR6t169YqW7asp8sqsOinsbZs2aJ3331XFotFo0aNUpUqVTxdUoFGP28vIyNDY8eO1eeff673339f1apVU69evRQQEKA333xT9evXv+Gspa+//loTJkzQn3/+KX9/f9ntdtlsNk2ePFlVq1b10JHkD/TTWPfSz6tt375dw4cP1/HjxzV79mxFRETkYfUAkLdYlxuLfhqPtbmx6OftsTY3Fv00FlkHN8NwCABMLD09Xfv371dISIhCQkI8XU6BRz/vzMaNG7V+/XoNHTpUkrRjxw69/vrr8vf311tvveVelF59luL+/fv1xx9/KDY2VuXLl1eDBg1Urlw5Tx5GvkE/jXWn/bxeYmKiXn/9dW3atEmLFi3iLDoAADyMtbmx6OedYW1uLPppLLIOrsdwCAAA5LnsC4hmL+JjYmI0YMCA2y5KcXP001j30s/09HT98MMPqlixImfRAQAAmBhrc2PRT2ORdXA1rjkEAADyXPZ+xtlnd5UuXVo1a9bU0qVLtXXrVlWqVEnly5eXJO3evVsWi0X+/v4eqze/o5/Gupd+BgYGKjw8XEFBQR6rGwAAAJ7H2txY9NNYZB1cjeEQAADIF65elG7btk2VKlXS0aNH9dZbb+mnn35Su3btOCPsLtBPY9FPAAAA3CvWksain8ain+bFtnIAACBf2b17t/r37y9vb29lZWXpwoULmjZtmh544AFPl1Yg0U9j0U8AAADcK9aSxqKfxqKf5sMnhwAAQL6QvedxSEiIbDablixZIkmaOXOmqlWr5uHqCh76aSz6CQAAgHvFWtJY9NNY9NO87J4uAAAAQPq/PY9/+uknLVq0SP7+/po1a5YiIyM9XFnBRD+NRT8BAABwr1hLGot+Got+mhfDIQAAkG+kpqZq3rx5OnDggL777jsWo/eJfhqLfgIAAOBesZY0Fv00Fv00J645BAAA8pWjR4/K6XSqYsWKni6lUKCfxqKfAAAAuFesJY1FP41FP82H4RAAAAAAAAAAAICJWD1dAAAAAAAAAAAAAPIOwyEAAAAAAAAAAAATYTgEAAAAAAAAAABgIgyHAAAAAAAAAAAATIThEAAAAAAAAAAAgIkwHAIAAAAAAAAAADARhkMAAAAAAAAAAAAmwnAIAAAAAAAAAADARBgOAQAAAAAAAAAAmAjDIQAAAAAAAAAAABNhOAQAAAAAAAAAAGAiDIcAAAAAAAAAAABMhOEQAAAAAAAAAACAiTAcAgAAAAAAAAAAMBGGQwAAAAAAAAAAACbCcAgAAAAAAAAAAMBEGA4BAAAAAAAAAACYCMMhAAAAAAAAAAAAE2E4BAAAAAAAAAAAYCIMhwAAuIWoqChFRUVp69athj5vp06dFBUVpTFjxhj6vHcqPj7efWzx8fEeqQEAAACA55B1AMDcGA4BAAAAAAAAAACYiN3TBQAAgLzn5eWlSpUquf8MAAAAAIUBWQcA7gzDIQAATCgkJEQrVqzwdBkAAAAAYCiyDgDcGbaVAwAAAAAAAAAAMBE+OQQAyFWdOnXStm3b1KdPH/Xs2VNff/21Fi5cqD/++EM+Pj6qVauW+vXrp6pVq0qSLl++rC+//FLLli1TfHy8HA6HGjZsqNdee03ly5e/6fc4c+aMpk6dqo0bN+r48eOSpDJlyqhZs2Z6+eWXFRwcnGN9Fy5c0MSJE7V69WolJCSoaNGiql27trp166bo6OjbHp/T6dTSpUu1ZMkS7d27V0lJSQoICFD16tX19NNPq02bNrJYLPfQubsTFxenadOmadu2bTp16pScTqeCgoIUEhKiBg0aqH379oqIiHDfPz4+Xi1atJAkrV27VmXLlnXfFhUVdUffs0OHDvrggw9u+PrOnTv1zTffaOfOnTp79qy8vb1VqVIlPf744+rYsaP8/f3v82gBAAAAzyPrkHXIOgAKMoZDAIA8kZmZqa5du2rz5s3y8vKSl5eXEhMTtXbtWm3evFnTp09X2bJl9fLLL+vXX3+Vw+GQxWLR+fPntXz5cm3btk1z585VWFjYNc+7bds29e7dW0lJSZIkPz8/SdKhQ4d06NAhzZ07V+PHj1fdunVvqCk+Pl4vvviiO2R5eXnp8uXLWrlypdatW6fRo0ff8pjOnz+vPn36aPv27e6vFSlSRH/++ac2bdqkTZs26b///a9Gjx4tb2/v++rfrWzatEk9evRQenq6+zh8fX116tQpnTp1Sr/88ou8vLzUt2/fO3q+WwVMSTp37pxcLtcNX3c6nXr//fc1Y8YM99f8/Px0+fJl7dmzR3v27NH8+fM1ZcoUlSlT5i6OEAAAAMi/yDpkHbIOgIKI4RAAIE/MmjVLVqtVo0ePVosWLWS327Vnzx699tprOnbsmIYPH67g4GBduHBBU6ZMUaNGjSRJW7du1WuvvaZz585p1KhRGjlypPs5T5486Q5LkZGRGjZsmOrUqSNJ2rFjh4YMGaLDhw+rd+/eWrx4sUJCQtyPzcrKUv/+/XX8+HEVLVpU77zzjh577DHZ7XYdOnRIQ4cO1eDBg3M8nqysLPXt21fbt29XtWrV1L9/fzVo0EC+vr66dOmSVq1apY8++kjr1q3TyJEj9dZbb+VSZ6W3335b6enpatKkid544w1VqVJFkpSWlqajR49q5cqVNwTNW9m0aVOOt82ZM0f/7//9P0lS06ZNr7nts88+04wZM1SiRAn17t1bbdq0UbFixZSRkaGYmBh98MEH+vXXX9W3b1/NnTtXViu72wIAAKDgI+uQdcg6AAoiflMBAPJEUlKSxo0bpyeeeEJeXl6yWCyqWbOm3n33XUnSzz//rB9++EFffvmlmjRpIqvVKqvVqoYNG2rAgAGSpNWrVysjI8P9nBMnTlRSUpKKFi2qadOmucOSJNWtW1fTpk1TQECAzp8/r88///yaelauXKnY2FhJ0ujRo9W6dWvZ7VfOmYiMjNTkyZNVrFixHI9nyZIl2rZtm8LDwzVjxgw1b95cvr6+kq6cQfY///M/mjRpkiwWi2bNmqVz584Z0MUbnTt3TkePHpUkjRgxwh2WJMnhcKhy5crq06ePnn766fv+Xps2bdKwYcMkSX379tWTTz7pvi0+Pl6TJk2Sj4+Ppk6dqo4dO7r75+Xlpfr162vGjBkKDQ3V3r17tW7duvuuBwAAAMgPyDpkHbIOgIKI4RAAIE/UqVPnptsd1KtXz70NQatWrVShQoUb7vPII49IklJTU/XHH39Iklwul1asWCFJev7551WyZMkbHhcaGqrnn39ekvTf//73mtuWLVsmSapdu7YaNmx4w2N9fX3VtWvXHI9n3rx5kqQXXnhBRYoUuel9oqOjVblyZWVkZGjr1q05Ptf98Pf3d5+VdubMmVz5HpL022+/qX///srMzFTbtm3Vp0+fa25fsGCBsrKy9Mgjj7j3VL9eQECAWrZsKUn64Ycfcq1WAAAAIC+Rdcg6ZB0ABRHbygEA8kTNmjVv+nWbzabixYsrISFBNWrUuOl9SpQo4f7zhQsXJF05e+v8+fOSdNPAk61x48aaPHmyzp8/r2PHjqlcuXKS5D6TrkGDBjk+NqfbsrKytGvXLknS2LFjbzhT72rZ9Wbv9W00Hx8fNWzYUJs2bVLXrl31/PPP6y9/+YuqVatm2N7fZ8+eVffu3XXx4kXVqlVLI0aMuOE+MTExkq6ccde4ceMcn+vSpUuSpBMnThhSGwAAAOBpZB2yjkTWAVDwMBwCAOQJf3//HG/L3uIgp/tk3y5dudirpGu2Lrh6f+3rXX1bYmKiOzBlP/5Wjw0NDb3p1y9cuOC+IGp2ILqd1NTUO7rfvXjvvffUs2dP7d+/X+PHj9f48ePl5eWlGjVqqEWLFnr22WdvuW3EraSmpqpXr146fvy4ypYtq/Hjx980iJ0+fVrSlUCUHYpu97wAAABAYUDWIetc/7wAUBAwHAIA4C5lZWW5//zFF1/ccLHSvBYWFqYFCxZo06ZN2rBhg2JiYnTgwAHFxMQoJiZGkyZN0ujRo2951uHNuFwuDRo0SL/88ouKFCmizz//XEFBQTe9b3ZPXnnlFQ0cOPC+jwkAAABA3iPr3IisA6Cw4ppDAIAC6ertFxISEnK839W3Xb3Yz378nT72asWKFXOf4ZdftgywWq165JFHNGTIEM2fP19bt27VyJEjFRYWpgsXLmjgwIHuMwDv1KhRo7Ry5UrZbDb95z//UWRkZI73zd4HPb/0AwAAACioyDrXIusAQO5gOAQAKJDKli3r3j5g8+bNOd7vp59+knQl5GRvsyBduYCqpFtePHXLli03/Xr2NgaStH79+rsrPI8EBASoXbt2Gj58uKQre2kfPHjwjh8/d+5cTZo0SZL0r3/9y32h3JzUrl1b0pV+p6Wl3WPVAAAAAMg6t0bWAQBjMBwCABRIFotFrVu3liTNmTNHZ86cueE+CQkJmjNnjiSpbdu219z25JNPSpJ27tx509CUmpqqKVOm5Pj9//a3v0mSNmzYoA0bNtyy1uyLyeaG250h53A43H+2Wu/sbX/z5s16++23JUmdOnVSx44db/uYZ555Rna7XX/++ac+++yzW943PT1dKSkpd1QLAAAAYDZknSvIOgCQuxgOAQAKrB49eigwMFDnz59Xly5dFBMT475t586d6tKli5KSklSsWDF169btmsc+/vjjeuCBByRJ/fr108qVK917ScfFxemVV15RYmJijt/7qaeeUqNGjeRyudS7d2+NHz/+mq0ZLl26pC1btuidd95Ry5YtjTzsa/z8889q166dpk2bpri4ODmdTklX9tCOiYlxB5/Q0FBFRUXd9vmOHDmifv36KSMjQ82aNdObb755R3WUL19ePXv2lCRNnjxZgwYNuubsvczMTO3bt09jx47V448/rn379t3lkQIAAADmQdYh6wBAbrN7ugAAAO5VaGioxo0bp169eum3337TCy+8ID8/P0lXAoskBQYGaty4cQoJCbnmsXa7XaNHj1anTp108uRJ9evXT97e3nI4HLp48aK8vLw0evRo9erV66bf22azacyYMRo4cKDWr1+v0aNHa/To0QoICJDVatXFixflcrnc3ys3HTx4UCNGjNCIESPk5eUlf39/JScnKzMzU9KVbRc++eQT2Wy22z5XTEyMkpKSJEm7du265QVoW7durSFDhrj/3rt3b2VlZWnChAlatGiRFi1aJB8fH/n4+OjixYvXXNzWYrHc6+ECAAAAhR5Z5wqyDgDkHoZDAIACrV69elq2bJm+/PJLbdiwQcePH5fFYlFERISaNWuml19+2X0B0euVK1dOCxcu1MSJE7V69WolJCTI4XCoUaNG6tatm3uv7pwEBARo4sSJ2rBhgxYuXKhdu3bp7NmzcrlcCgkJUWRkpOrXr+/eEiI31KhRQ59++qm2bt2q3bt36/Tp0zp//ry8vb1VuXJlNW7cWC+++OINgfFOXLhw4Za3JycnX/N3i8Wi/v37q3Xr1vrmm2+0detWnTx5UsnJyQoMDFTFihVVu3ZtPfbYY6pVq9Zd1wMAAACYCVmHrAMAucniyh71AwAAAAAAAAAAoNDjmkMAAAAAAAAAAAAmwnAIAAAAAAAAAADARBgOAQAAAAAAAAAAmIjd0wUAAABp2bJlGj58+F09pnXr1hoyZEguVQQAAAAA94+sAwD5E8MhAADygdTUVJ09e/auHpOcnJxL1QAAAACAMcg6AJA/WVwul8vTRQAAAAAAAAAAACBvcM0hAAAAAAAAAAAAE2E4BAAAAAAAAAAAYCIMhwAAAAAAAAAAAEyE4RAAAAAAAAAAAICJMBwCAAAAAAAAAAAwEYZDAAAAAAAAAAAAJsJwCAAAAAAAAAAAwEQYDgEAAAAAAAAAAJgIwyEAAAAAAAAAAAATYTgEAAAAAAAAAABgIgyHAAAAAAAAAAAATIThEAAAAAAAAAAAgIkwHAIAAAAAAAAAADARhkMAAAAAAAAAAAAmwnAIAAAAAAAAAADARBgOAQAAAAAAAAAAmAjDIQAAAAAAAAAAABNhOAQAAAAAAAAAAGAiDIcAAAAAAAAAAABMhOEQAAAAAAAAAACAiTAcAgAAAAAAAAAAMBGGQwAAAAAAAAAAACbCcAgAAAAAAAAAAMBEGA4BAAAAAAAAAACYCMMhAAAAAAAAAAAAE2E4BAAAAAAAAAAAYCIMhwAAAAAAAAAAAEyE4RAAAAAAAAAAAICJ/H8JVl9JGGHFSwAAAABJRU5ErkJggg==\n", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "plot_pruning_case(ret_data, x_label=[\"model_size\"]*4,\n", - " y_label=[\"accuarcy\", \"params\", \"Inference Time\", \"flops\"], \n", - " to_png_file=\"./default_purning.png\", dpi=100) # json key error" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Index(['index', 'model_size', 'accuracy', 'latency_per_input', 'flops',\n", - " 'params', 'latency_sum', 'input_num', 'model_file'],\n", - " dtype='object')\n", - "{'flops': 'flops(M)', 'model_size': 'model_size(K)', 'params': 'params(K)', 'input_num': 'input_num(K)'}\n" - ] - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAABngAAAPtCAYAAACtkmH8AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAAuIwAALiMBeKU/dgAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvnQurowAAIABJREFUeJzs3Xt4lOWd//HPM6cnyUxCCBCQQ4AGCdgIBk+goKvWCnJarKAoWBep+PvRg3i41l5Vf652ta1Ua7vUgiC763qgbotQq3SXqhUErcqpKWkUKgbEkAQIZHKY8++PYcaEgOTwTGYmeb+uK5dx5nnu3HMgcM/n+X5vIxKJRAQAAAAAAAAAAIC0YUv2BAAAAAAAAAAAANA+BDwAAAAAAAAAAABphoAHAAAAAAAAAAAgzRDwAAAAAAAAAAAApBkCHgAAAAAAAAAAgDRDwAMAAAAAAAAAAJBmCHgAAAAAAAAAAADSDAEPAAAAAAAAAABAmiHgAQAAAAAAAAAASDMEPAAAAAAAAAAAAGmGgAcAAAAAAAAAACDNEPAAAAAAAAAAAACkGQIeAAAAAAAAAACANEPAAwAAAAAAAAAAkGYIeAAAAAAAAAAAANIMAQ8AAAAAAAAAAECaIeABAAAAAAAAAABIMwQ8AAAAAAAAAAAAaYaABwAAAAAAAAAAIM0Q8AAAAAAAAAAAAKQZAh4AAAAAAAAAAIA0Q8ADAAAAAAAAAACQZgh4AAAAAAAAAAAA0gwBDwAAAAAAAAAAQJoh4AEAAAAAAAAAAEgzBDwAAAAAAAAAAABphoAHAAAAAAAAAAAgzRDwAAAAAAAAAAAApBkCHgAAAAAAAAAAgDRDwAMAAAAAAAAAAJBmCHgAAAAAAAAAAADSDAEPAAAAAAAAAABAmiHgAQAAAAAAAAAASDMEPAAAAAAAAAAAAGmGgAcAAAAAAAAAACDNEPAAAAAAAAAAAACkGQIeAAAAAAAAAACANEPAAwAAAAAAAAAAkGYcyZ4AUtfRo/UKhyPJngYAAADShM1mqHdvd7KnAZwRax0AAAC0VSqvcwh4cFrhcIRFDwAAQBoxDMk0nXI67XI4bLLbvyjYD4XCCgbDCgRC8vkCivDPPPRgrHUAAADSB+uc0yPgAQAAANKcYRhyu10yTadsNuOUxzgcdjkcdmVkOOV2m/L5Aqqv9yvS01ZAAAAAANIC65wzI+ABAADoAbjiqfsyTYc8HlM2W/Q1Ld1bo007D2rP/lpVVB6XLxCS6bSrYECORgzJ1aSxA1Vc2FeZmS6ZpkNer08+XzDJjwIAAAAAvsA6p22MSE+JstBuhw97aVsAAD0MIUBqseL1aMsVT82Fw5Eed8VTOnO7TWVluSRJ28urtHJdqSoO1Z3xvIIB2Vo4o1glRfmSpIYGv+rrfZ2ej81mqE8fT6fHARKNtQ4AAEDqYp3TdgQ8OC0WPQDQcxACpBarXo+OXvEUHTPcY654SlexRU8wGNbytbu04d1P2z3G5PFDtWjWGDkcNksWP6m88AGaY60DAACQmljntA8BD06LRQ8A9AyEAKnFqtcj1a54grVM06GcnEwFg2E98ux72lZe1eGxxhXl64EFF8vhsOn48cZO/XlO5YUP0BxrHQAAgNTDOqf9CHhwWix6AKBrJLMtGiFAarHq9QgEQnI67Sl1xROsYxiG8vKyZLPZtOzlHR16fU82efxQLZ59nsLhsI4caehwZV4qL3yA5ljrAAAApBbWOR1jf+ihhx5K9iSQmhob/eyvAPQwhiFlZDiVmemS2+2SxxP9sDkrK7pBndNpl81mKBQKJ3uq3YJhGPJ4TGVnZyojwymHwy6bzSbDMOJfNptNDoddpulQRoZLdruhQMCa57952fPTv9mpZ9aV6li9v03nHvP69eaHB3T0eJPGFeXLNB0yDEOBQMiSufVEVr0e54/Kl8Nhj1/xtGnnwQ7NZ8+BY/p4f60mjR0k03QoFArzZz9FeDymXC6Htp0IAa2w58AxjR6Wp4H9PDIMye/v2J9lwzDiISWQyljrAAAApBbWOR1DBQ9Oi6vagJ4jlfZfSWY1S1eyqg1XR5+vVC177qmsej1ys00tv+8qZWU4U+6KJ1jDMKS8PI9sNkOLf/JGmyq82qpgQLaW3XulwuGIjhzxduh3bCpf2QY0x1oHAAAgdbDO6ThHsicAAEiujgYNmZnRqh6r9l9pS8jkcNjlcNiVkeGU222eNmRK9ZCorW24mvwhfVRxVB9VHNVr73zSog1XTk6mAoGQ7HZbu58vKXpljCQtX7urU+GOJG0rr9Lytbu0ePZ58nhM+f0hQoB2iFVySZ1/PW68ukhZGU5tK6+yJNyRpA3vfqpLxgxUSVG+3G6XvF5atSVT7Hdk6d4aSxc9klRRWafSvTUqLuwr03SqqSlg6fgAAAAAcCqsczqOgAcAejCrgobO7s9hVchkZUh0Op0Nj5q34Wrv3igVlXV6cMVWfWf2WF198VA5nfYOPV/BYFg2m40QIEW43S5LXo9M06Erzh8iSVplUTl7zMr1pVp275UyTafq631pXT2X7mJ/7jvaeu9MNu88qOLCvnI67d1u4QMAAAAgNbHO6TgCHgDooawIGmKbsMdColOFPGcKRKRo6CJ1LmTy+YJyOm0Jq0SyIjwyTUf8Oe9IGy7DkG6bXqyvjx/WqefL6YzOnxAg+WJ/PqTOvx6TzhukrAwHVzx1cw5H9Hfcnv21CRk/Nm7s5wAAAABAorHO6TgCHgDogTobNMRsePdTVR1t1AMLLj4xXigekrQ1EJHU6ZDp/3xjrEwz+ldaIiqRrKgw8vtDnW7Dddv0Ys28vFCBYFgrOvh83TW3RFdcUEAIkCKsLEM/Z3ieJK546u5iIXlF5fGEjB97HzYP4wEAAAAgkVjndBwBDwAkUCruBWPlfh/SqfdfcbnsZwxErrxgiO68cZwCwbB+2ImQaXB+tmw2I2GVSFbul9OZNlyXlQyKhzudeb5i+0kTAqQGK8vQRwzOlcQVTz2FLxBKzLj+zu+pBgAAAAAdwTqn/Qh4ACABumIvmI6yar+P5prvv5Kbm3nGlmu52aYWTC+WJK3oRMhkVehxukokK9vYxT4U70gbrtxsU4tmjZHUuedLIgRINVaWoQ/o65bEFU89hem0q8lv/eLHdLE8AAAAAJAcrHPar/s+MgBIEivaeX3ZXjCdYeV+HyeL7b/icNjPGIjceHWRctyuToVMVoYeUutKJMmwrI3dkP7ZmnFZYYfbcFnxfMUQAqQWK8vQnSfG4oqn7i0UCsvhiP4d8lHFUcvHL+ifHf85AAAAANAVWOd0HAFPG5WWlurJJ5/U9u3bFYlEVFJSonvvvVejR49udey2bdv0+OOPa/fu3fJ4PJoyZYqWLFkit9udhJkD6EpWtfM63V4wnWXlfh8nG3ZWjiSdMRDJNB264vwhkjoXMlkZesQ0r0TKzraujZ07MxqqdaQNl1XPVwwhQGqy4vUIhMIybXaueOrmgsHowmfEkNyELHxGDMmN/xwAAAAA6AqsczqOy2vb4K9//atuuukmHThwQN/+9re1ePFi7du3T/PmzdPf//73FseWlZXp1ltvVVNTk+677z5df/31WrNmjb73ve8lafYAukrzdl7LXt6hB1dsbXOIEmvntezlHQoGw8rKcsntNi2fo5X7fTTXvJrmTIHIpPMGKSvD0amQyerQo7mV66PjGYZhWXjUmbZoVjxfzQVOXK1inngvWI0QoGOseD0qa+olSQUDcjo91ql05yue0kngRBg4aezAhIw/8cS4gQSFwAAAAABwMtY5HcenMG3w1FNPKSMjQy+99JJ69+4tSZoxY4auueYaPfnkk/rFL34RP/aJJ55QTk6OnnvuOXk8HknS4MGDdf/992vz5s2aOHFiUh4DkIpi7cKcTrscDluLlk6hUFjBYFiBQEg+X0AJ3pam00zTYVk7r1PtBWMVK/f7aK491TTnDM+T1LmQyerQo7nqo40KhcOy22yWhUedaYtmxfPVXGVNvYaelUPZc4qwsgx9z4FaDT0rhyueujmfLyC321RxYV8V9M+29HdgwYBsFRf2VTgckc8XsGxcAAAAAPgyrHM6jgqeNvjggw80YcKEeLgjSfn5+brooov05ptvqr4+esWs1+vVli1bNGPGjHi4I0kzZ85UVlaWXn/99S6fO5BohiFlZDiVnZ2h3r2z1LevJ/7Vu3eWsrMzlJHhlGE0P8eQx2MqL88Tv9/hsMswjPiXw2GPj5uX55HHY8poPkgKiT0eyZp2XrG9YCRZ/rit3O8jpr3VNJ2pZomxOvRobtJ5g2S32SwNjzrTFs2K56u5PQei48Q+rLcaIUD7xJ4nK16P3Z8ckcQVT91dJKL4omThzGJLx144IzpeOlxYAQAAAKD7YJ3TcQQ8beD3+5WRkdHq9oyMDAUCAX388ceSpPLycgWDQRUXt3wTulwujR49WmVlZV0yX6ArdCSk6dUrU716ZapPH7cyM11qa25hGFJmpkt5eVkyzdQrPHS7XbLZbJbvBbO9vEo2m01ut8uSMZuzcv+V9lbTdKaaJcbq0KO5RIRHnWmLZsXz1RwhQGqxsgx9047P1NAUjF/xZKXufsVTuqmv9yscDqukKF+Txw+1ZMzJ44eqpChf4XBY9fV+S8YEAAAAgLZindMxqfdJaQoaPny4duzYoVAoJLs9+uGc3+/Xrl3RK+wPHTokSaqurpYUre45Wb9+/fThhx920YyBxDJNhzweUzZbNCMu3VujTTsPas/+WlVUHpfNZuiK84fo/FH9NWJwL+VmmzIMyXVib47IibjcMIxW5/oCIZnOaLuiEUNyNWnsQBUX9o0fn5OTqYYGv+rrfcl58CeJtZmTErMXzLJ7r5RpOlVf77P0KgMrN2FvbyDSmWqWGKtDj+YSER51pi2aFc9Xc5t2fKbbZhRT9pwirCxDb/QF9eaH+zX10uFaOLNYD67Yatk8u/sVT+kmEonI6/UpJydTi2aNUdXRxk5Vj44ryo/vo+b1+uJ/TwMAAABAV2Gd0zFU8LTBTTfdpH379ukHP/iB9uzZo48++kj//M//HA90mpqaWvzX5Wp9tb1pmvH7gXRlGFKvXpnKycmUzWbT9vIqLf7JG/r+L9/Ra+98oqqjDbp12le1+oFrdMd1Y3ThOf1PhDtGizZjhmG0OvejiqNq8ocUiUhN/pA+qjiq1975RN//5Tta/Pgb2l5eFR8jK8slt9tM1tPQgmk6ZbMZCdkLpqKyTqV7a2SzGfEQqbNi+6JYuQl7ewORzlSzxFgdejSXiPCoM23RrHi+mouFABJlz6nA6jL0l/63XMfr/Vzx1AP4fEE1NPjlcNj0wIKLO/x6Tx4/VA8suFgOh00NDX5L930DAAAAgPZgndN+BDxtMHfuXN1xxx169dVXNXXqVE2fPl0VFRW67bbbJElud/TDwFgbN7+/9YcfPp/vlG3egHQQa8fWp49HLpdDwWBYy17eoQdXbI2HGpeVDNKye6/U1EuHKyvDoZraxvi5x7xfVNsETnHumVRU1unBFVu17OUdCpzYryIry5US7dqcJz50T8ReMJK0+cS4Tos+3Ldyv4+Y9gYilTXRfcs6EzJZHXo0l4jwqDNt0ax4vk5GCJBarCxDr63zxffwWjRrjMYVta4qbo+ecsVTuqqv98UXP4tnn6eHb5+gggFta89XMCBbD98+QYtnnxdf9KRKdSwAAACAnot1Tvsk/9PRNLFkyRItWLBAH3/8sbKzs1VUVKQnnnhCkjRs2DBJ0TZsklRV1bp0rLq6+pSt24BUd3I7tmAwrEeefS9eImkY0m3TizXz8kJJ0vbyKh2ta9KVFxQoEAyr/NMj+upX+kiKhjs/bHZue21491NVHW3U/QsultNhk8djyu8PJfUDR4cj+rwkYi+Y5uPGfk5nBQIhZWQ4NWnsQL32zieWjNneQGTPgVoNPSvagq+97cpiOtPy7EwCobBMm93SNnadaYtmxfN1slgIcO+8C3Q7Zc9JZ3UZurchoHA4Er/iafnaXR3aH2zy+KFaNGtMj7jiKZ3V1/sUDIbk8ZgqKcrXsnuvVOneGm2OtT89VCefPyjT5VBB/2yNGJKric3an4bDYXm9Pl5fAAAAACmDdU7bEfC0Q69evXTBBRfE/3/Lli0aMGCAvvKVr0iSRo4cKYfDodLSUl177bXx4/x+v8rKyjRlypQunzPQGW63qaysaMvBYDAsh8Om5Wt3tfjgMRbuBIJhrVi7Sw2+oO6dd4ECwbBe+dMezb5qZPzD3hUnndsR28qrtGLtLi2efZ4Mw5Db7ZLXm7wk3n4i3EjEXjCS4kFA7Od0lpX7fcS0NxDZ/ckRXXVhQadCpkSEHjGJCI86szeKFc/Xqby9/TONHNJbMy8vJARIAbEy9KwslyWvh81mKBAIyem0a/Hs83TJmIFaub5UFZVn/jNfMCBbC2cUq+RE9U9PuOIp3fl8Qfn9IbndLpmmU8WFfeMLm9OJ7ZVVX+8nlAUAAACQcljntA0t2jrotdde01/+8hd985vfjFc2ZGdna8KECVq/fr28Xm/82HXr1qmhoUGTJ09O1nSBdouFO8FgWH/ZUy2Hw6Zt5VUtPnC8rGRQPNz54bPv6d2/Vsav4v+v13brmvHDFIlEZBhGq3M7Y8O7n8b35MnIcKrZ9j5Jk4i9YCTJ57f2w3Kr9/uQ2t9CbNOOz9TQFIyHTB3RmZZnZ9KZ/XK+TEfbolnxfJ3O//45+meSsufUYHUZem1tg44fb4y3f1t275V67P9eqqmXDldRQW9lmg7ZDCnTdKiooLemXjpcj/3fS7Xs3ivj7faOH2/kdU0TsUqwI0e8qqtrUlNTQMFgtMo19hUMhtTUFFBdXZOOHPFScQcAAAAgpbHOOTMqeNrg/fff17Jly3TppZcqNzdXO3fu1G9/+1tNmjRJt9xyS4tjlyxZohtvvFHz58/XnDlzVFlZqdWrV2vixIm67LLLkvQIgPYxTUc83PnJcx/ozrnjJEmr1pXGj8nNNuNhTqwy547rxijH7dK28irl93Erx+2K/0Jtfq4VVq4v1bJ7rzwxX6eamgKWjt9eVrbzajGuy/pf0/X1fpmmIx40dDZ4a281TWeqWWI60/LsTCqPNEiS5RUzzduitacNlxXP1+ncNiMa8vn9QTkcNsqeU4DVZehc8dTzRCJSU1Mg6X8vAgAAAIBVWOecHgFPG/Tv3192u12rVq1SfX29Bg8erDvvvFO33nqrHI6WT+FXv/pVrV69WkuXLtVjjz0mt9ut66+/XnfddVeSZg+0j2EY8nhMSdLytbuU7XYpK8Oh0r01LT5Ev/HqoniYs+HdT5VpOnTF+UMkSc+/XqZH7rg0Pt7J51qhorJOpXtrVFzYVxkZjqT9gg+FwnI47AnZC0ZSvGIjFApbNqbV+33UN0af+/YEIi/9b7kmnTeowyFTIkOP4q/0USQSSUh4tO/zaCu/9u6N0tnn61Qmjx/arEqjSZIIAVKE1aFM7M98fb1PpumU02mXw2Fr0foxFAorGAwrEAjJ5wuIlxQAAAAAgNRHwNMGBQUFWrVqVZuPv+CCC/TSSy8lcEZA4rjdLtlsX7Rju/PGEknSpp0H48c0D3NilTmTzhsUD4KGDeylrAyHQqGw7HZbi3OttHnnQRUX9rVsf5qOiO5NZE/IXjDSF23CgkHrAh7J2v0+rr1keLsDkebVLLd3MGRKVOgx9ux+8Q/IrQ6PFp6omAkGQ3I42r43Skerf05nXFF+vAKveekyIUDqSEQowxVPQM+xa9cuvfLKK3rvvff02WefKTc3V2PHjtWdd96p4cOHx4+77777tHbt2lbnDx8+XBs2bGhxWzgc1qpVq/Tiiy+qurpaw4YN06JFizRt2rSEPx4AAAAAp0bAAyDOMKLtzqQvgpsRg6MBw579tfHjmoc5sQ/zzxmeJykaBMW+t9mMVudaKTaukcRNeAKBkDIynJa384qZeGKPmUAC9viJ7auRleXq9Cbssc3c2xOIvL39M40c0lszLy/U/Qsu1op2hkyJDT2a5HabCauYqa1tlMtlb3cbLqn91T+nmseiWWPi+7Sc3FqNECC18HoA6IiVK1dq27Ztmjx5soqKilRdXa3nn39e1113ndasWaORI0fGj3W5XPrhD3/Y4vzs7Nb7fz355JNasWKF5syZo3PPPVd//OMfdffdd8swDE2dOjXhjwkAAABAawQ8AOJM0ymbrWVLtQF93ZKkisrj8eOahzkxzYOgaycMazFu83OtZHXbt47w+QJyu82EtPMqGJCt4sK+8dZLiWDVfh9+f0h5eVntDkRW/S4aJM68vLBDIdPXLiyQZH3o0dQUVCQiy9rYnapipqNtuMLhcLuqf2JODuUaGvzxkA8A0L3ceuutWrp0qVwuV/y2a6+9VtOnT9eKFSu0dOnS+O0Oh0MzZ8780vEOHTqk1atX6+abb9aDDz4oSZo9e7bmzZunn/zkJ5o8ebLsdntiHgwAAACA0yLgARDndEYX5s2DG+eJdkC+ZhUkp6rqaR4Exb6P8SWg+kSSfP7kb+oeiURDnsxMV8LaeSW6FZZV+310ZF+fSERaub5U5RVHdcd1YzocMgUCYZmmw9LQw8o2dqermOloGy7TdHQ6lDu5cgcA0H2MGzeu1W3Dhg3T2Wefrb///e+t7guFQmpsbJTH4znleBs3blQgENBNN90Uv80wDM2dO1d33323tm/frgsuuMC6BwAAAACgTQh4AMQ5HNEPlZsHN4FQWKbNLtNpV5M/GtScqqqneRAU+z4iyZBanGsl05Uav8Lq6/0yTUfC2nnV1/stmOWXs2K/j84EIpt2fKa/7KnRjV8v0tUXFXQoZEpE6GFlG7svq5hpbxsuq0I5AEDPEYlEVFNTo7PPPrvF7Y2NjTr//PPV2NioXr16aerUqbrnnnvkdn9xwU5ZWZmysrJUWFjY4twxY8bE7yfgAQAAALpeanw6CiAlxD7Mbx7cVNbUa+hZOSoYkKOPKo5KOnVVT/MgKPZ9Y1NQ7kxni3OtVNA/2h8+2Z9Vx8KRRLbz6iqd3e+jM4FIjselgX3dcp2oJAsGo++vtoZMiQo9rGpjZ3XFjBWhHACg51i/fr0OHTqk7373u/Hb+vXrp4ULF+qcc85RJBLRpk2b9MILL+hvf/ubnnvuOTkc0eVidXW1+vTp02rfw379+kmSqqo6/u8eAAAAAB1HwAOglebBzZ4DtRp6Vo5GDMmNhzSnquppHgTFvq8+2iB3Zq8W51ppxJBoq7hQKGz52O3VFe280kUyA5FEhR6pXDHT2VAOAND97d27Vw8//LBKSko0a9as+O133313i+OmTp2qYcOG6cknn9Qf/vAHTZ06VZLU1NTUYj+fGNM04/cDAAAA6HoEPABaaR7c7P7kiK66sECTxg7Ua+98IunUVT3Ng6DY9/urvBo2sFeLc600cexASUqZD7a7qp1XOkh2IJKI0IOKGQBAOqqurtaiRYuUnZ2tp556Sna7/UuPv/XWW/XUU09py5Yt8YAnIyNDfn/rlrE+ny9+PwAAAICuR8ADIC4UCsvhsLcIbjbt+Ey3zShWcWFfFfTPVsWhulNW9TQPgt788ICuurBAfXIyFAyGW5xrlYIB2Sou7KtIJBoKpIpUbeeVDN01EKFiBgCQLurq6vStb31LdXV1ev7559W/f/8znpORkaHc3FwdO3Ysflu/fv303nvvKRKJtGjTVl1dLUnKz8+3fvIAAAAAzoiAB0BcMBgNeJoHN42+oN78cL+mXjpcC2cW68EVW09Z1dM8CFr9u7+qoSmoc77SR1v/clATzh0YP9cqC2cUS4p+0J5qoUCyq1dSDYEIAABdz+fz6Y477tC+ffu0evVqjRgxok3neb1eHT16VHl5efHbRo8erZdffll79+5tMc7OnTvj9wMAAADoerYzHwKgpwic2Htn0onWZzEv/W+5jtf7VVKUr8njh2rTjs/U0BSMV+ZIXwRBkjRvyuj4955Mlxp9wfi5Vpg8fqhKivIVDkdUX9+6XUgqiFWvHDniVV1dk5qaAgoGQ4pEIvGvYDCkpqaA6uqadOSIV16vr9uFOwAAoOuFQiHdeeed2rFjh5566imVlJS0Osbn88nr9ba6/Ze//KUikYgmTZoUv+2qq66S0+nUCy+8EL8tEonopZdeUv/+/U85PgAAAIDEsz/00EMPJXsSSE2Njf6Uq4xAYoVCYWVkuNS/j1vv7DyoYyfCkyZ/SDW1jbp0zECVFOWr7JMjCgTDGlnQWwP7uvXmhwckRffh+dqFBRp2Vo7e2fGZCgfnqmBAtt78oEKFg3M1rihfH++v1eeH6zs8x3FF+brrpvNlsxmqq2tSMBi25LEnUjAYlt8fVFNTQA0N/vhXU1NAfn8wLR4DAABtYRiGsrJcyZ5Gj/fYY4/plVde0eWXX64hQ4aovLy8xdeoUaNUWVmpKVOm6ODBg6qoqNDOnTu1bNkyrV27VpMmTdL3vve9eDs2j8cjr9er//iP/1B1dbVqamr085//XFu2bNG//Mu/aNSoUUl+xO3HWgcAAABtlcrrHCPC5eI4jcOHvQqHeXv0NB6PqcxMl7aXV7VqqbZwRrFmXl6oQDCs/3ptt75x1UjluF1a9vIObXj3U0nSZSWDdO+8CxQIhvXKn/Zo9lUjFQiG9UFZpSacO1DBYFjL1+6KH98ek8cP1aJZY+Rw2NTQ4Fd9vc+SxwwAAKxhsxnq08eT7Gn0ePPnz9ef//zn095fXl6u48eP65FHHtHOnTtVVVWlUCikoUOHavr06VqwYIGcTmeLc8LhsJ555hmtWbNGVVVVGjZsmG6//XbNmDEj0Q8nIVjrAAAAoK1SeZ1DwIPTYtHTMxmGoby8LNlsthbBTfQ+6bbp0ZBHkvYdPKZhA3spEAzrh8++p23lVZJaBkF//XuNzhuZ3+J7SdpeXqWV60tVUVl3xjkVDMjWwhnFKimKnku4AwBAakrlhQ/QHGsdAAAAtFUqr3MIeHBaLHp6LtN0KCcnU8FgWI80C25iJp03SHdcN0Y57i9KEwPBsFacqMw5OQiqOtKg/LwsSdInB48pPy9L7ozoVaGle2u0eedB7dlfq4pDdfL5gzLZlSvFAAAgAElEQVRdDhX0z9aIIbmaOHagigv7Sor2eq+ra5LPF+yKpwEAALRTKi98gOZY6wAAAKCtUnmdQ8CD02LR07O53aayslynbamW6zF149eLdMX5Q5SV4Yjf3rwy5+QgKBgKy2G3SZIOH2tUttsll8N+xrlEIhEFAiEdP94kfmUBAJC6UnnhAzTHWgcAAABtlcrrHAIenBaLHsRCHun0LdUyTYcmnTdIV5w/WKOH58luiwY4scqcysP1mnDuWZp03uB4EBSJROKb9n6ZWLBTV9fEexEAgDSQygsfoDnWOgAAAGirVF7nEPDgtFj0QIq2a/N4TNlOCm5O1VLt3BF9NW3icOXlZLQ5wDlZKBRWMBhWIBCSzxcQv6EAAEgfqbzwAZpjrQMAAIC2SuV1DgEPTotFD2IMw5Db7ZJpOmWznTm4CYcjCgZDCocjcjhssp9oyyYR4AAA0J2l8sIHaI61DgAAANoqldc5jjMfAqCni0Qi8np9qq/3yTSdcjrtBDcAAAAAAAAAkEQEPADaLBKRmpoCamoKJHsqAAAAAAAAANCj2c58CAAAAAAAAAAAAFIJAQ8AAAAAAAAAAECaIeABAAAAAAAAAABIMwQ8AAAAAAAAAAAAaYaABwAAAAAAAAAAIM0Q8AAAAAAAAAAAAKQZAh4AAAAAAAAAAIA0Q8ADAAAAAAAAAACQZgh4AAAAAAAAAAAA0gwBDwAAAAAAAAAAQJoh4AEAAAAAAAAAAEgzBDwAAAAAAAAAAABphoAHAAAAAAAAAAAgzRDwAAAAAAAAAAAApBkCHgAAAAAAAAAAgDRDwAMAAAAAAAAAAJBmCHgAAAAAAAAAAADSDAEPAAAAAAAAAABAmiHgAQAAAAAAAAAASDMEPAAAAAAAAAAAAGmGgAcAAAAAAAAAACDNEPAAAAAAAAAAAACkGUeyJ5Au9u3bp6eeekoffvihjh07prPOOkvTpk3TbbfdpszMzPhx27Zt0+OPP67du3fL4/FoypQpWrJkidxudxJnDwAAAAAAAADdj2FIpumU02mXw2GT3f5FTUMoFFYwGFYgEJLPF1AkksSJAglAwNMGn3/+uWbPnq3s7GzNmzdPvXr10o4dO/SLX/xCf/3rX/X0009LksrKynTrrbeqsLBQ9913nyorK/Xss89q3759WrlyZZIfBQAAAAAAAAB0D4ZhyO12yTSdstmMUx7jcNjlcNiVkeGU223K5wuovt6vCEkPugkCnjZYt26djh8/rhdeeEFnn322JOmGG25QOBzWK6+8omPHjqlXr1564oknlJOTo+eee04ej0eSNHjwYN1///3avHmzJk6cmMyHAQAAAAAAAABpzzQd8nhM2WzRap3SvTXatPOg9uyvVUXlcfkCIZlOuwoG5GjEkFxNGjtQxYV9lZnpkmk65PX65PMFk/wogM4j4GkDr9crSerTp0+L2/v16yebzSan0ymv16stW7bom9/8ZjzckaSZM2fq0Ucf1euvv07AAwAAAAAAAACd4HabyspySZK2l1dp5bpSVRyqa3Vckz+kjyqO6qOKo3rtnU9UMCBbC2cUq6QoXzk5mWpo8Ku+3tfV0wcsZTvzIbjoooskST/4wQ9UVlamzz//XK+99ppefPFFzZ8/X1lZWSovL1cwGFRxcXGLc10ul0aPHq2ysrJkTB0AAAAAAAAAuoVYuBMMhrXs5R16cMXWU4Y7p1JRWacHV2zVspd3KBgMKyvLJbfbTPCMgcSigqcNLrvsMn3ve9/T8uXL9cYbb8Rvv+OOO7RkyRJJUnV1tSQpPz+/1fn9+vXThx9+2DWTBQAAAAAAAIBuxjQd8XDnkWff07byqg6Ns+HdT1V1tFEPLLj4xHgh2rUhbRHwtNGgQYN0wQUX6JprrlFubq7eeustLV++XP369dO8efPU1NQkKVqxczLTNOP3AwAAAAAAAADazjAMeTzRapvla3d1ONyJ2VZepeVrd2nx7PPk8Zjy+0OKRCJWTBXoUgQ8bfD73/9eDz74oP7whz9owIABkqSvf/3rikQiWrp0qaZOnaqMjAxJkt/vb3W+z+eL3w8AAAAAAAAAaDu32yWbzaZt5VXa8O6nloy54d1PdcmYgSopypfb7ZLXy348SD/swdMGL7zwgkaPHh0Pd2KuvPJKNTY2qqysTP369ZMkVVW1To+rq6tP2boNAAAAAAAAAHB6hiGZplOStGpdqaVjr1wfHc80nTIMS4cGugQBTxvU1NQoHA63uj0QCEiSgsGgRo4cKYfDodLSlr9k/H6/ysrKNGrUqC6ZKwAAAAAAAAB0F6bplM1mqHRvjSoO1Vk6dkVlnUr31shmM+IhEpBOCHjaYPjw4dq9e7c++eSTFrf//ve/l81mU1FRkbKzszVhwgStX79eXq83fsy6devU0NCgyZMnd/W0AQAAAAAAACCtOZ12SdKmnQcTMv7mE+PGfg6QTtiDpw1uu+02vf3227r55pt18803Kzc3V2+99ZbefvttzZ49W/3795ckLVmyRDfeeKPmz5+vOXPmqLKyUqtXr9bEiRN12WWXJflRAAAAAAAAAEDyxdquOZ12ORw22e1f1CGEQmEFg2EFAiH5fAE5HNH79uyvTchcYuPGfg6QToxIJBJJ9iTSwa5du/SLX/xCZWVlqq2t1aBBgzRr1iwtXLhQDscXOdkHH3ygpUuXavfu3XK73ZoyZYruuusueTyeJM6+Yw4f9ioc5u0BAACAtrHZDPXpk37/7kXPw1oHAIDkMAxDbrcr3nbtTMLhiAwjet7s77+qJn/I8jllmg79+tGpikQiqqnxnvkE9DipvM4h4MFpsegBAABAe6TywgdojrUOAABdzzQd8nhM2WzRSpnSvTXatPOg9uyvVUXlcfkCIZlOuwoG5GjEkFxNGjtQxYV94+fPuGedEvFJts2Q1i2dScCD00rldQ4t2gAAAAAAAAAACeN2m8rKckmStpdXaeW6UlUcqmt1XJM/pI8qjuqjiqN67Z1PVDAgW7+4+wrZbIZMpz0hFTymi4/Ikb549wIAAAAAAAAAEiIW7gSDYS1fu0sb3v20zedWVNZp/6E6DT0rRwUDcvRRxVHL51fQP1tSdO8fIN2wcxQAAAAAAAAAwHKm6YiHO488+167wp2YPQdqJUkjhuRaPb0W4waDBDxIPwQ8AAAAAAAAAABLGYYhj8eUJC1fu0vbyqs6NM7uT45IkiaNHWjZ3JqbeGLcQMD69m9AohHwAAAAAAAAAAAs5Xa7ZLPZtK28qkOVOzGbdnymhqagigv7xtupWaVgQLaKC/sqHI7I5wtYOjbQFQh4AAAAAAAAAACWMQzJNJ2SpFXrSjs1VqMvqDc/3C9JWjizuNNza27hjOh4Pl9AkYilQwNdgoAHAAAAAAAAAGAZ03TKZjNUurdGFYfqOj3eS/9bruP1fpUU5Wvy+KEWzFCaPH6oSoryFQ6HVV/vt2RMoKsR8AAAAAAAAAAALON02iVJm3YetGS82jqflq/dJUm6fdYYjSvK79R444rytWjWGEmS1+tThPIdpCkCHgAAAAAAAACAZRyO6MfOe/bXWjbm29s/07o/7ZXTYdP9Cy7ucCXP5PFD9cCCi+Vw2NTQ4JfPF7RsjkBXcyR7AgAAAAAA6+zatUuvvPKK3nvvPX322WfKzc3V2LFjdeedd2r48OEtjt27d68effRRbdu2TU6nU5dffrm+//3vKy8vr8Vx4XBYq1at0osvvqjq6moNGzZMixYt0rRp07ryoQEAgDRht0cDnorK45aOu+p30f18Zl5eqMWzz9MlYwZq5fpSVVSeuQ1cwYBsLZxRrJIT1T8NDX7V1/ssnR/Q1YwI9Wc4jcOHvQqHeXsAAACgbWw2Q336eJI9jR7vu9/9rrZt26bJkyerqKhI1dXVev7559XQ0KA1a9Zo5MiRkqTKykr94z/+o7KzszV//nw1NDTo2Wef1VlnnaWXX35ZLpcrPuZPf/pTrVixQnPmzNG5556rP/7xj3rrrbf0xBNPaOrUqcl6qB3GWgcAgMTq29cjwzA04551SsSnz5eXDNLdN58vwzAkSaV7a7R550Ht2V+rikN18vmDMl0OFfTP1oghuZo4dqCKC/tKil644vX6qNxBm6XyOoeAB6fFogcAAADtkcoLn55k27ZtKi4ubhHQ7Nu3T9OnT9c111yjpUuXSpIeeughrV27Vq+//roGDhwoSdqyZYv+6Z/+SQ8//LBuuOEGSdKhQ4d01VVXac6cOXrwwQclSZFIRPPmzdOBAwf0xhtvyG63d/Gj7BzWOgAAJFYs4Jn9/VfV5A9ZPn6m6dCvH52qSCSiSCT679AzCYcj8vkCqq/3s+cO2iWV1znswQMAAAAA3ci4ceNahDuSNGzYMJ199tn6+9//Hr/tf/7nf/QP//AP8XBHki655BINGzZMr7/+evy2jRs3KhAI6KabborfZhiG5s6dq8rKSm3fvj2BjwYAAKSjUCgsSSoYkJOQ8Qv6Z8d/zpEjXtXVNampKaBgMHQi9Il+BYMhNTUFVFfXpCNHvPJ6fYQ76FYIeAAAAACgm4tEIqqpqVHv3r0lRatyDh8+rOLi4lbHjhkzRmVlZfH/LysrU1ZWlgoLC1sdF7sfAACguWAwGvCMGJKbkPFj4waDYUUiioc4R482qKbGG/86erQhHv6Q66A7IuABAAAAgG5u/fr1OnTokKZMmSJJqqqqkiT169ev1bH9+vVTbW2t/H6/JKm6ulp9+vSJ97hvflzzsQAAAGICgWhbtkljB57hyI6ZeGLc2M8BeioCHgAAAADoxvbu3auHH35YJSUlmjVrliTJ5/NJUqtWbpJkmqYkqampKf7fthwHAAAQ4/MFFA5HVFzYN95OzSoFA7JVXNg3vqcO0JMR8AAAAABAN1VdXa1FixYpOztbTz31lOx2u6QvwplYlU5zsfAnIyMj/t+2HAcAABATiSgeviyc2bolbGcsnBEdz+ej7RpAwAMAAAAA3VBdXZ2+9a1vqa6uTitXrlT//v3j9+Xn50uKBkAnq66uVm5ubrxqp1+/fqqpqWm1IXHs3NhYAAAAzdXX+xUOh1VSlK/J44daMubk8UNVUpSvcDis+vrWF6AAPQ0BDwAAAAB0Mz6fT3fccYf27dunX/3qVxoxYkSL+/v376+8vDyVlpa2OnfXrl0aNWpU/P9Hjx6txsZG7d27t8VxO3fujN8PAABwskgkIq83WvG7aNYYjSvq3EUh44rytWjWGEmS1+trdfEJ0BMR8AAAAABANxIKhXTnnXdqx44deuqpp1RSUnLK477+9a/rrbfe0ueffx6/bevWrdq3b58mT54cv+2qq66S0+nUCy+8EL8tEonopZdeUv/+/U87PgAAgM8XVEODXw6HTQ8suLjDlTyTxw/VAwsulsNhU0ODXz5f0OKZAunJkewJAAAAAACs86Mf/UhvvPGGrrjiCtXW1mrdunUt7p85c6Yk6Y477tCGDRt0yy236JZbblFDQ4NWrVqlkSNH6hvf+Eb8+AEDBuiWW27RqlWrFAwGde6552rjxo364IMPtHTp0vi+PgAAAKdSXx+t4snKcmnx7PN0yZiBWrm+VBWVdWc8t2BAthbOKFbJieqfhgZ/fDwAkhGhlg2ncfiwV+Ewbw8AAAC0jc1mqE8fT7Kn0ePNnz9ff/7zn097f3l5efz7jz/+WD/60Y/04Ycfyul06vLLL9d9992nvn37tjgnHA7rmWee0Zo1a1RVVaVhw4bp9ttv14wZMxL2OBKJtQ4AAF3PNB3yeEzZbNGmUqV7a7R550Ht2V+rikN18vmDMl0OFfTP1oghuZo4dqCKC6P/JgmHw/J6fVTuIClSeZ1DwIPTYtEDAACA9kjlhQ/QHGsdAACSwzAMud0umaZTNptxxuPD4Yh8voDq6/3suYOkSeV1Di3aAAAAAAAAAAAJF4lE5PX6VF/vk2k65XTa5XDYZLd/sVV8KBRWMBhWIBCSzxcQuQ5wegQ8AAAAAAAAAIAuE4lITU0BNTUFkj0VIK3ZznwIAAAAAAAAAAAAUgkBDwAAAAAAAAAAQJoh4AEAAAAAAAAAAEgzBDwAAAAAAAAAAABphoAHAAAAAAAAAAAgzRDwAAAAAAAAAAAApBkCHgAAAAAAAAAAgDRDwAMAAAAAAAAAAJBmCHgAAAAAAAAAAADSDAEPAAAAAAAAAABAmiHgAQAAAAAAAAAASDMEPAAAAAAAAAAAAGmGgAcAAAAAAAAAACDNEPAAAAAAAAAAAACkGQIeAAAAAAAAAACANEPAAwAAAAAAAAAAkGYcyZ5AOrjvvvu0du3a097/9ttvq3///pKkbdu26fHHH9fu3bvl8Xg0ZcoULVmyRG63u6umCwAAAAAAAAAAujkCnja44YYbNGHChBa3RSIRPfTQQxo0aFA83CkrK9Ott96qwsJC3XfffaqsrNSzzz6rffv2aeXKlcmYOgAAAAAAAAAA6IYIeNqgpKREJSUlLW774IMP1NjYqOnTp8dve+KJJ5STk6PnnntOHo9HkjR48GDdf//92rx5syZOnNil8wYAAAAAAAAAAN0Te/B00KuvvirDMDRt2jRJktfr1ZYtWzRjxox4uCNJM2fOVFZWll5//fVkTRUAAABAF/N6vTp8+LBCoVCypwIAAACgm6KCpwMCgYBef/11lZSUaPDgwZKk8vJyBYNBFRcXtzjW5XJp9OjRKisrS8ZUAQAAACTYgQMHtHnzZr3//vvavn27qqurFQwG4/dnZ2frK1/5ii666CJdeOGFuuSSS2S325M4YwAAAADdAQFPB2zevFm1tbUt2rNVV1dLkvLz81sd369fP3344YddNj8AAAAAiRUOh7Vx40atWbNGW7duVSQSUSQSOeWxx48f144dO7Rz504988wz6tOnj77xjW9o9uzZ8QvGAAAAAKC9CHg64NVXX5XT6dSUKVPitzU1NUmKVuyczDTN+P0AAAAA0tvGjRv105/+VPv27YuHOgUFBRozZozOOecc9e7dW7169VJGRoZqa2t17NgxHThwQLt27VJpaalqamq0YsUKrVq1SrNnz9Z3vvMd5eXlJflRAQAAAEg3BDztVF9frz/+8Y+aOHGievfuHb89IyNDkuT3+1ud4/P54vcDAAAASF/z58/XBx98oEgkolGjRmnGjBmaNm3aKSv5TyUcDmvr1q1av369Nm7cqBdffFG/+93v9JOf/ERXXnllgmcPAAAAoDsh4GmnjRs3qrGxsUV7Ninahk2SqqqqWp1TXV3d5gUfAAAAgNT1/vvva+LEifrOd76jsWPHtvt8m82mSy+9VJdeeqkaGxv13HPPafXq1dq9ezcBDwAAAIB2IeBpp9/97nfKyspqtfgaOXKkHA6HSktLde2118Zv9/v9Kisra9HODQAAAEB6WrNmTYeCnVPJzMzU7bffrnnz5umzzz6zZEwAAAAAPYct2RNIJ0eOHNHWrVt19dVXKzMzs8V92dnZmjBhgtavXy+v1xu/fd26dWpoaNDkyZO7eroAAAAALGZVuNNcVlaWzj77bMvHBQAAANC9pVzAc/XVV2vFihU6fPhwsqfSymuvvaZgMNiqPVvMkiVLdOzYMc2fP18vvviinnzyST3yyCOaOHGiLrvssi6eLQAAAIBEaH5BV0f98pe/tGAmAAAAAHoyIxKJRJI9ieZGjRolwzBkt9t11VVX6YYbbtAll1yS7GlJkm644Qbt379fmzZtkt1uP+UxH3zwgZYuXardu3fL7XZrypQpuuuuu+TxeLp4tp13+LBX4XDXvz0MQzJNp5xOuxwOm+z2L3LIUCisYDCsQCAkny+g1Hr3AgAA9Gw2m6E+fdLv373tNX/+fK1atUoul6tD5z/99NP6+c9/rrKyMotnhrZK1loHAAAA6SeV1zkpF/D87Gc/09q1a3Xo0CFJkmEYGjx4sObMmaPrrrtOffr0SfIMe46uXvQYhiG32yXTdMpmM854fDgckc8XUH29Xyn2NgYAAOiRUnnhY6VRo0bpyiuv1L/927/JZmtfU4QVK1boiSeekGEYBDxJRMADAACAtkrldU7KBTySFA6H9ac//Ulr1qzRpk2bFAqF4lU9X/va1zRnzpyUqerpzrpy0WOaDnk8ZnyBXLq3Rpt2HtSe/bWqqDwuXyAk02lXwYAcjRiSq0ljB6q4sK+k6PvF6/XJ5wt2yVwBAABwaqm88LHSV7/6VYXDYc2YMUM//vGP23zeM888o5/+9KeSpIkTJ2rlypWJmiLOgIAHAAAAbZXK65yUDHiaO3TokP77v/9bv/nNb3Tw4EFJVPV0la5a9LjdprKyou0ttpdXaeW6UlUcqjvjeQUDsrVwRrFKivIlSQ0NftXX+xI6VwAAAJxeKi98rPSb3/xG999/vyRp3rx5+sEPfnDGc1atWqXHH39cknTppZfq6aef7nCLN3QeAQ8AAADaKpXXOSkf8MREIhFt3rxZv/71r/Xmm28qGAxS1ZNgXbHoiYU7wWBYy9fu0oZ3P233GJPHD9WiWWPkcNgIeQAAAJIolRc+Vvv3f/93/ehHP5JhGFq8eLG+/e1vn/bY1atXxyt9LrnkEj399NMyTbOrpopTIOABgJ6F/Z4BdEYqr3PSJuBp7tChQ7rnnnv0/vvvS4pW9EhSQUGBFixYoOuvv152uz2ZU+wWEr3oMU2HcnIyFQyG9ciz72lbeVWHxxpXlK8HFlwsh8Om48cbadcGAACQBKm88EmEn/3sZ/rVr34lwzB0//336+abb251TCwIkqTx48dr+fLlhDspgIAHAHoG9nsGYIVUXuekVcBz8ODBeLu2qqoqRSIRGYah0aNH66OPPopX9Zxzzjl65plnlJeXl+wpp7VELnoMw1BeXpZsNpuWvbyjQ5U7J5s8fqgWzz5P4XBYR4408BcxAABAF0vlhU+iPPzww3rhhRdks9n04x//WNOnT4/f95//+Z969NFHJUkXXXSRVqxYoYyMjGRNFc0Q8ABA98d+zwCsksrrnJQPeEKhkN58802tWbNGW7ZsUTgcViQSUW5urmbNmqW5c+eqoKBANTU1evHFF7V69Wo1Njbquuuu07/+678me/ppLZGLHo/HVGamS9vKq/T/Vmy1bNyHb5+gkqJ8NTb65fXSqg0AAKArpfLCJ5Huuecevfrqq3I4HFq2bJkuv/xyPffcc3r00UcViUR04YUXasWKFcrMzEz2VHECAQ8AdG/s9wzASqm8zknZgOfAgQN6+eWX9dvf/lY1NTXxaoySkhLNnTtXkydPPuWmpH/5y180e/Zs9evXT5s2berqaXcriVr0GIaUl+eRzWZo8U/eaNNfsG1VMCBby+69UuFwREeOeOmbCgAA0IVSeeGTSKFQSIsXL9Zbb72ljIwMXX/99fqv//ovSdIFF1ygZ555hnAnxRDwAED3xX7PAKyWyuscR7IncLINGzbo17/+td59911FIhFFIhG53W7NmDFDc+fO1ciRI7/0/HPPPVd9+/ZVTU1NF80Y7RXre1q6t8bScEeSKirrVLq3RsWFfWWaTjU1BSwdHwAAADiZ3W7Xz3/+c9122216//339fzzz0uSxo0bR+UOAABdyDQd8XCnM/s9b3j3U1UdbdQDCy4+MV6Idm0AUlLKBTx33nln/PvRo0dr7ty5mjZtmrKysto8xqkqe5A6nE67JGnTzoMJGX/zzoMqLuwrp9NOwAMAAIAu4XK59PTTT+uWW27R7t27NW7cOK1cubJd6xgAANBxhmHI4zElScvX7upwuBOzrbxKy9fu0uLZ58njMeX3h9jvGUDKSbmAxzRNXXvttZo7d67GjBnToTHeeOMNi2cFKzkc0c3t9uyvTcj4sXFjPwcAAACw0ujRo7/0fsMwtH37dp1//vlfeszu3butnhoAAD2W2+2SzWbTtvKqDrVlO5UN736qS8YMVElRvtxuF/s9A0g5KRfwbNq0STk5OcmeBhLIbo8GLxWVxxMyfqztW+znAAAAAFbi6l0AAFKLYUS3BJCkVetKLR175fpSLbv3SpmmU/X1PvZ7BpBSUi7gIdzpOXyBUGLG9dMTFQAAAInz7W9/O9lTAAAAzbDfM4CeKuUCnsOHD+v3v/+98vLyNG3atC89dv369aqtrdW0adOUl5fXRTOEVUynXU1+60Me05Vyb2sAAAB0IwQ8AACkFvZ7BtBTpVwPq/Xr1+uxxx7Tp5+euVfm3/72Nz32/9m79+ioqrv/4+8z1yQzCYGEkAaTgKEEMASCKKCg/MBqEAWxaC1q6oWKRduq1FZa9alWS4u32ke0UBFbL49SFbEWUbnUCyIqIJhyTUSD0CQEEsjkcub6+yNmahogASaZAT6vtbKUM/vs+cxaWbD2fM/e31mzeP311zshmURKIBAEICu9Y3ZrZfVIbPE+IiIiIiIiIiJyfDEMiIuzk5gYR9euCaSmusM/XbsmkJgYR1ycHcNQv2cROXnF3N9KK1asAKCwsLDNsZdccgmhUIjly5d3dCyJIL+/qfDSJzO5Q+Zvnrf5fURERERERERE5PhgGAZut5Nu3dzhIo7NZsUwjPCPzWYNF3+6dXOr37OInLRi7iyrsrIyHA4HOTk5bY7t27cvTqeTnTt3dkIyiRSfL0BcnJ1RgzJYsmpHxOcfOSgj/D4iIiIiIiIiInJ8cDptuN1OLJamQkpxaRXvbdhNyc4aysoPYPoCOO1WstKT6JOZzKhBGeTlpIbvV79nETnZxFyBZ+/evbjd7naPj4+Pp6qqqgMTSaSZpg+Xy0leTipZPRIj2vwuKz2RvJxUgsEQpqkzUUVEREQksm644QZ+8pOfkJeXF5H5Ghsbee6554iPj2fKlCkRmVNEROR45HI5SUhwALB+ayVPLi4+6HdGjd4A28qq2VZWzZJVO8hKT+R/Z/w/LBZD/Z5F5KQTc/sK3W43tbW1mKbZ5ljTNKmtrSU+Pr4TkkmkhEKEiy9TJ2sJQAAAACAASURBVEZmYdxs6oSm+UzTRygU0alFRERERHj33Xe57LLL+NGPfsS7775LMHh0xwLv2rWLxx9/nLFjx/Lggw9SXV0d4aQiIiLHj+bijt8fZM7fPuXueavb/UBwWXktO78eq37PInKyibny87e//W0++eQTVq5c2WYfnhUrVhAIBOjdu3cnpZNIqavz4nTaKMhNo3B4Nks//PKY5ywcnk1BbhrBYJC6Om8EUoqIiIiItLRgwQJ+97vfsXLlSv75z3/SrVs3CgsLOf3008nPz+eUU0456H0NDQ0UFxezYcMGVqxYwfr16wGw2+1ce+21FBUVdebHEBERiRlOpy1c3PnNU2tYt7XyiOco+aqG7G81Hdu2rSzyD02o37OIxKqYK/CMGTOGjz/+mNmzZ1NQUECPHj0OOq6iooLZs2djGAbnnXdeJ6eUYxUKhfB4TJKS4pk2KZ/K6oaj+ge82ZDcNKZNygfA4zEJafuOiIiIiHSAESNG8Oqrr/LKK6/w5JNPsmPHDp5//nmef/55AFwuF127dqVLly44HA4OHDhATU0N1dXV4d0+oVAIp9PJ+PHjuemmm+jZs2c0P5KIiEjUGIaB2+0EYO6ijUf93dCmHfsYe0aW+j2LyEnHCMXYN+ENDQ2MGzeOiooKkpOTmTZtGqNHjyYjo+kv0t27d7Ny5UrmzZtHdXU16enpLFmyhISEhCgnP/Hs3eshGOzYX49vbsGdu2jjUe3kKRyezbRJ+dhsFurrvdTVtX28n4iIiIhEnsVikJLS/n6aJ4KPPvqIF198kVWrVlFTU3PYsVarldNOO43x48czadIkkpI65hgZaVtnrHVERKRtbreT+HgH67ZW8j/zVh/1PPFOG0/ffQEJcTZumr0i4v2e59w+hmAwxL59HrUEEDkJxfI6J+YKPAD/+te/mDp1KtXV1RiGcdAxoVCIrl278tRTT9G/f/9OTnhy6KxFT6smeq8VU1be9j/EWemJTJ2QR0FuGoCKOyIiIiJRFssLn85QUlLChg0bqKysZN++fZimSdeuXenatSt9+vShoKAAl8vVKVnq6uqYP38+GzZs4LPPPmP//v3MmjWLSy+9tMW4O+64g0WLFrW6v3fv3ixdurTFtWAwyPz58/m///s/9uzZQ69evZg2bRoXXXRRh36WjqACj4hI9BkGdOvmxmIxIlKUufHSfMaf3Zv1Wyu5+xiKRf/t3htGUJCbRkODF49H3zuJnIxieZ0Tc0e0AZx22mksWrSIhx56iDfeeAO/39/idbvdzvjx47n11lsPeYSbHD/q6kz8/gBut5OC3DTm3D6G4tIq3t+wm5KdNZRV1GJ6/TgdNrJ6JNInM5mRgzLIy0kFmhaaHo+JafrbeCcRERERkY7Tp08f+vTpE+0YAFRXVzNnzhwyMjLIzc3lo48+OuRYh8PBfffd1+JaYmJiq3GPPPII8+bN4/LLL2fgwIEsX76cGTNmYBgG48ePj/hnEBGR45thgNNpx263YrNZsFot4dcCgSChUAiLxaC4tCoiO25eeHsrowb3VL9nETmpxOQOnm9qbka6Z88eDMOge/fu5OXlERcXF+1oJ7zOfqrNMAxcLgdOpx2L5eA7t74pGAxhmj7q6rzquSMiIiISA2L5ybaTjdfrZf/+/XTv3p3PPvuMyZMnH3IHz5tvvsn69esPO19FRQVjx47l8ssv5+677waaTlW46qqr+Oqrr1ixYgVWq7XDPk+kaQePiEjHOdLvd554ZWPE+uacU9CT268ais8f5L6n1hxzv+e7rhuGzWbhwIEGPVgschKL5XVOTO7g+ab4+HjOOOOMaMeQThAKhfB4TOrqzMM+4eH3B/H5ApimT+eeioiIiIgchMPhoHv37u0eHwgEaGhowO0++MJ12bJl+Hw+pkyZEr5mGAbf//73mTFjBuvXr2fo0KHHnFtERI5vTqcNt9uJxdL0XU5xaRXvNZ/QUn4A0xfAabeSlZ7Er645k25d4ijZefgedkfi3fW76JvZlYnn5nDndcOYF6F+zyruiEisivkCj5x8QiFobPTR2OiLdhQRERERkTYdOHCArVu34nK5GDBgQIvXKisrue+++1i1ahUWi4XRo0dzxx13kJKSEqW0rTU0NHD66afT0NBAly5dGD9+PD/72c9a9AvavHkzCQkJ5OTktLg3Pz8//LoKPCIiJ7dWPZYXFx/06LVGb4BtZdW4EuwAlJUfiGiO+X8vBmDiuTncdNlgzsrPUL9nETlhqcAjIiIiIiJyDF566SUeeOABpkyZ0qLA4/f7uf766ykpKQkfKfz666+zZcsWXn75ZRwOR7Qih3Xv3p2pU6cyYMAAQqEQ7733Hs8//zxbtmzhmWeewWZrWjLu2bOHlJQUDMNodT80FbJEROTk1Vzc8fuDzG3nrhn71ye2mL5ARLOEQvDka8Vs31nNjCtPV79nETmhxWyBZ8uWLTz33HOsXbuW8vJyGhoaDjnWMAw2bdrUielERERERESarFq1CoDx48e3uL5kyRK2b99OXFwc11xzDXFxccyfP5+SkhIWLlzIVVddFY24LcyYMaPFn8ePH0+vXr145JFHePPNN8OfqbGx8aAFKafTGX5dREROTk6nLVzc+c0R9L3xBYI4LVacdiuN3sgWeQA+2lSBYRiEQiFCIcjLSQ0XcA5F/Z5F5HhjaXtI53v22WeZPHkyL730Ep9//jn19fVf/2V86B8REREREZFo+PLLpqeU+/bt2+L6G2+8gWEY/PjHP+aWW27hxhtv5J577iEUCvHmm29GI2q7XHPNNVgsFj744IPwtbi4OLxeb6uxpmmGXxcRkZOPYRi43U3F/rmLNra7uANQXlUHQFZ6Uodky+qRCDT1c963z0NtbSONjT78/kCL7xT9/gCNjT5qaxvZt8+Dx2Pqu0YROW7E3A6eDRs2cP/99wMwZcoUzj33XG644Qa6dOnCH/7wB6qqqvjggw94/fXXcbvd3HnnnUfUPFRERERERCSSqqurSUhIwO12t7j+ySefAHDxxReHr5133nkYhsH27ds7NeORiIuLIzk5mf3794evde/enTVr1hAKhVoc07Znzx4A0tLSOj2niIhEn8vlwGKxsG5rZbuOZfumkq9qyP5WEn0yk9lWVh3xbH0ykwHw+4Pq9ywiJ6yY28Hz17/+lVAoRFFREXfddRfnnHMOAHa7nREjRnDxxRcza9YsFi5ciGEYPProo60amYqIiIiIiHQW0zQJBoMtrn3++efU1taSnZ3dovjhcDhISkrC4/F0dsx283g8VFdX061bt/C1/v3709DQQGlpaYuxGzZsCL8uIiInF8MAp9MOwPzFxUd8/6Yd+wAYNSgjormajfx6Xl+Ee/yIiMSSmCvwrF+/HsMwKCoqOuy4/v37c+edd1JWVsb8+fM7KZ2IiIiIiEhLKSkpNDY2hnezAKxevRqAgoKCVuNN0yQxMbHT8h2KaZoHLTQ9/vjjhEIhRo0aFb42duxY7HY7zz//fPhaKBTihRdeoEePHgf9nCIicmJzOu1YLAbFpVWUVdQe8f3vfbqL+kY/eTmp4ePUIiUrPZG8nNRwTx0RkRNVzB3RVlVVhcPhoGfPnuFrFoslfLbzN33nO9/BZrPx9ttv89Of/rQzY4qIiIiIiAAwcOBAli9fzoIFC/j5z39OQ0MDL7zwAoZhMGLEiBZjKyoqaGxsJCsrq8NzPfvssxw4cIDKyqZ+CCtXrqS8vByAq6++mv379zNp0iTGjx/PqaeeCsD777/PO++8w6hRoxg7dmx4rvT0dIqKipg/fz5+v5+BAweybNkyPvnkEx588EGsVmuHfx4REYktdnvT3/3vbdh9VPc3mH5Wrt3J+LN7M3ViHnfPWx2xbFMn5AFgmj7UTkdETmQxV+CJj49vdc3lcuHxePB6vTgcjvB1u91OfHw8u3bt6syIIiIiIiIiYd/73vdYtmwZCxYsYOXKldTV1VFZWUlKSgrnn39+i7EffvghAH379u3wXE899VSLtdJbb73FW2+9BcCECRNISkpi9OjRfPDBB7z66qsEAgGys7O57bbbuO6667BYWh748LOf/YwuXbrw4osv8sorr9CrVy8eeOCBFj2GRETk5GGzNf07UbKz5qjneOHtrYwa3JOC3DQKh2cfcR+fgykcnk1BbhrBYJC6Ou8xzyciEstirsCTlpbGjh078Pv92GxN8TIzM9m8eTMbN25k6NCh4bEVFRXU1tYetCgkIiIiIiLSGUaNGsXNN9/M448/zo4dOwDo2rUrDz74IHFxcS3Gvv766wAMGzasw3OtWLGizTEPPPBAu+ezWCxMmzaNadOmHUssERGJUc09dex2KzabBav1P4X+QCCI3x/E5wuEd8U0v15WfuCo37Om1mTuoo3cftVQbpiUT2V1A+u2Vh71fENy05g2KR8Aj8ckpO07InKCi7kCT05ODiUlJWzbto0BAwYATYufTZs28fjjj/PEE0/gdDrxer3cf//9QOc8/SYiIiIiInIoN998M5deeikbNmwgKSmJ/Pz8Vn12vF4vgwcPZtCgQYwePTo6QUVERP6LYRi4XI5wT52Dsdms2GxW4uLsuFzOFn1tTF/gmN7/3fW76JvZlYnn5nDndcOYt2jjUe3kKRyezbRJ+dhsFurrvZim/5hyiYgcD2KuwHP22WezdOlSVqxYES7wTJkyheeee47Vq1dzzjnn0Lt3b7744gv279+PYRhceeWVUU4tIiIiIiInu4yMDDIyMg75usPh4KabburERCIiIofndNpwu53hYzmLS6t4b8NuSnbWUFZ+ANMXwGm3kpWeRJ/MZEYNyiAvJ5X4eEd4d4zTbqXRe2xFnvl/LwZg4rk53HTZYM7Kz+DJ14opK69t896s9ESmTsijIDcNgPp6L3V1rXt5i4iciIxQjO1VPHDgAH/961/p0aMHl112Wfj622+/zcyZM/F4POFrFouF66+/nhkzZkQj6glv714PwWBM/XqIiIiISAyzWAxSUtzRjiHSJq11RETA5XKSkNDU63r91kqeXFxMWcWRF1RmPPou28qqI5Jp1OCe3HhpPkmuplzFpVW831xwqqjF9PpxOmxk9UikT2YyI78uOAEEg0E8HlM7d0Qk4mJ5nRNzBZ7Dqamp4Z133qG8vBy3283IkSPJzs6OdqwTlhY9IiIiInIkYnnhI/JNWuuIyMmuubjj9weZe5RHoj3w41H069WNJ17ZyJJVOyKWLdnt5Irzc/nOmVk47NY2xweDIUzTR12dVz13RKRDxPI6J+aOaDuc5ORkJk6cGO0YIiIiIiIiLYRCIV5++WWWLFnC1q1b2b9/P4HAoY+rMQyDTZs2dWJCERGRJk6nLVzc+c1Ta1i3tfKo5nn7ozL69erGqEEZES3w1HhM/vTKRnp/K4kBp6bg8wUwDLBaLeExgUAQvz+IzxfANH2oriMiJ6uYK/CMGTMGi8XC/PnztTtHRERERERiXl1dHTfccAPr1q3Tk8MiIhLTDMPA7XYCMHfRxqMu7gC89+kurp+QR15OKlk9Ett1vFt7ZaUnMuDUFILBEPv316uAIyJyCDFX4NmzZw92u13FHREREREROS489thjrF27FqvVykUXXcTIkSNJTU3Fam37WBkREZHO5HI5sFgsrNtaeVTHsn1Tg+ln5dqdjD+7N1Mn5nH3vNURSglTJ+QBaHeOiEgbYq7Ak5aWxr59+6Id46D+9a9/8b//+7+sW7cO0zTJzMzk8ssvp6ioKDxm3bp1PPDAA2zatAm32824ceO49dZbcblcUUwuIiIiIiIdZenSpRiGwS9/+UuuvPLKaMcRERE5KMMAp9MOwPzFxRGZ84W3tzJqcE8KctMoHJ59zEUjgMLh2RTkphEMBqmr80YgpYjIicvS9pDOddZZZ9HY2Bhz51G///77fO9732Pfvn1Mnz6dX/3qV4wePZry8vLwmM2bN3PNNdfQ2NjIHXfcweTJk3nxxRf56U9/GsXkIiIiIiLSkfbu3YvVauWyyy6LdhQREZFDcjrtWCwGxaVVETtOrabWZO6ijQBMm5TPkNy0Y5pvSG4a0yblA+DxmDr6VESkDTG3g+eGG27gH//4B/feey8LFiwgPj4+2pHweDz84he/YPTo0fzxj3/EYjl4Xezhhx8mKSmJZ555BrfbDcApp5zCnXfeyfvvv8/IkSM7M7aIiIiIiHSC7t27c+DAARwOR7SjiIiIHJLd3nR06Hsbdkd03nfX76JvZlcmnpvDXdcNY+6ijUe1k6dweDbTJuVjs1mor/dimv6I5hQRORHF3A4eq9XKvffey7Zt27jooot4+umn2bBhAzt37mT37t2H/OlIf//736mqquLWW2/FYrFQX19PMBhsMcbj8fDBBx8wYcKEcHEHYOLEiSQkJPDGG290aEYREREREYmOkSNH4vF4KC0tjXYUERGRQ7LZmr4GLNlZE/G55/+9mHfXf4XNZuGmywZz7w0jyEpPbNe9WemJ3HvDCG66bHC4uFNXZ0Y8o4jIiSjmdvCMHTs2/P8NDQ38/ve/b/MewzA69Ei31atX43a7qaioYPr06XzxxRckJCQwYcIEfvnLX+J0Otm6dSt+v5+8vLwW9zocDvr378/mzZs7LJ+IiIiIiETPtGnTePPNN7n//vuZO3cudrs92pFERERasVqbCjxl5QciPncoBI/9bQPnFJxCKBSiIDeNObePobi0ivc37KZkZw1lFbWYXj9Oh42sHon0yUxm5KAM8nJSAQgGg3g8pnbuiIgcgZgr8BzN2ZodfR7nF198QSAQYPr06UyePJkZM2bw0Ucf8cwzz1BbW8vDDz/Mnj17AEhLa33WaPfu3Vm7dm2HZhQRERERkejIyMhg3rx53HLLLVx66aVcd9115OXl4XK52rxPRETkSBhGUy8du92KzWYJF20AAoEgfn8Qny+Aafo41Ndlpi/QIdlM738KMw0NXpxOO3k5qeECzqEEgyFM00ddnVc9d0REjlDMFXiWL18e7Qit1NfX09DQwBVXXMGdd94JwPnnn4/X6+XFF1/kJz/5CY2NjQAHPXfb6XSGXxcRERERkRPPqaeeypgxY3j22Wf55S9/2eb4jj6FQERETiyGYeByOXA67VgsxkHH2GxWbDYrcXF2XC7nIYsmTruVRm/kizxOx3++ZvR4TOrqzGMuRomIyOHFXIGnZ8+e0Y7QSlxcHAAXXXRRi+sXX3wxL774Ip9++ml4jNfrbXW/aZrh10VERERE5MSyb98+ioqKwj142vP0sZ5QFhGR9nI6bbjdTiyWpgJJcWkV7zUfe1Z+ANMXwGm3kpWeRJ/MZEZ9fexZfLwDp9MWPvYsEAhiszWN21ZWHfGcWT2aeu4EAk19q0MhaGz00djoi/h7iYhIk5gr8MSitLQ0tm/fTkpKSovr3bp1A2D//v1kZmYCUFlZ2er+PXv2HPToNhEREREROf7NmTOHkpIS4uPjufbaaxk5ciSpqalYrdZoRxMRkeOcy+UkIaHptJj1Wyt5cnExZRW1rcY1egNsK6tmW1k1S1btICs9kakT8ijITSMpKZ76ei9+f1OBp09mcocUePpkJgPg9wcjPreIiBycCjztcNppp7Fq1SoqKio49dRTw9ebizndunWjb9++2Gw2iouLufDCC8NjvF4vmzdvZty4cZ2eW0REREREOt7KlSsxDIP777+/xVpARETkWDQXd/z+IHMXbWTph1+2+96y8lrunreawuHZTJuUT0KCA9Ns6pEzalAGS1btiHjekYOaesv5OqjHj4iItBZzBZ5XX331qO675JJLIpzkP8aNG8e8efN46aWXGDFiRPj6Sy+9hM1m48wzzyQxMZERI0bw2muvMX36dNxuNwCLFy+mvr6ewsLCDssnIiIiIiLRs3fvXux2OxdccEG0o4iIyAnC6bSFizu/eWoN67a2PjGmPZZ++CWV1Q3cdd0wnE4boVCIvJxUsnokHnQn0NHKSk8kLyeVYDCEaepINhGRzhJzBZ477rgDwzh4s7hDMQyjQws8AwYM4Lvf/S4vv/wygUCAM844g48++oilS5cybdo0evToAcCtt97KFVdcwdVXX83ll19OeXk5CxYsYOTIkZxzzjkdlk9ERERERKInLS2NvXv36kg2ERGJCMMwcLudAMxdtPGoizvN1m2tZO6ijdx02eDwtakT87h73upjmvebpk7IA8A0fajNnIhI54m5Ak9GRsZhX/d4PBw4cACA+Ph4unbt2hmxuOeee8jIyOCVV15h2bJlZGRkMHPmTK655prwmNNOO40FCxbw4IMPMmvWLFwuF5MnT+a2227rlIwiIiIiItL5xowZw1//+lc+++wzBg4cGO04IiJynHO5HFgsFtZtrTyiY9kOZ+mHX3JWfgYFuWmEQiEKctMoHJ4dkfkLh2dTkJtGMBikrs4bgbQiItJeRih0/NXVv/jiC5544gmWLl3KrFmzdM51B9m710MweNz9eoiIiIhIlFgsBikp7mjH6HQ1NTVMnDiRlJQUnn76aZKSkqIdSdqgtY6IxCrDgG7d3FgsBjfNXhHxY9Tm3D6GYDCExWIc8/FvAENy07jrumHYbBYOHGgI9/kRETmRxPI657gs8DT71a9+xWuvvcbChQvp379/tOOccLToEREREZEjEcsLn4708ccfs2vXLn7729/icDi4/PLLyc/Px+VyHfa+M844o5MSyn/TWkdEYlVcnJ3ExDiKS6uY+fiqiM8/a/rZ5OWk4vX6cThs+P1B5i7aeFQ7eQqHZzNtUj42m4X6ei91dWbE84qIxIJYXucc1wWe8vJyRo8eTWFhIX/4wx+iHeeEo0WPiIiIiByJWF74dKR+/fodVR/RTZs2dVAiaYvWOiISqxIT44iLs/PEKxtZsmpHxOcff3Zvbrw0n8ZGH8FgiIQEBwDrt1by5GvFlJW3vWMoKz2RqRPyKMhNA1BxR0ROeLG8zom5HjxHIj09naSkJD7++ONoRxERERERkZPYkT43dxw/ZyciIkfJMMDptGO3W7HZLFitlvBrgUAQvz+I3W4FoGRnTYdkaJ7XZrNQXV2P3x/A7XZSkJvGnNvHUFxaxfsbdlOys4ayilpMrx+nw0ZWj0T6ZCYzclAGeTmpAASDQTweU8eyiYhE0XFd4DFNE4/Hg812XH8MERERERE5jm3ZsiXaEUREJIYZhoHL5cDptGOxHHzHp81mxWazhv9cVn6gQ7I09/RpLi6Zph+vNxDOl5eTGi7gHEowGMI0fdTVefXAgohIlB3XlZGXX36ZYDBIjx49oh1FRERERERERESkBafThtvtxGJpKqgUl1bxXvMOmfIDmL4ATruVrPQk+mQmc+OkgRiGgekLdEge09t6t00oFMLjMamrM9vcYeTzBTBNH6rriIjEhpgr8Ozevfuwr5umSXl5OW+++SYvv/wyhmFw3nnndVI6ERERERERERGRtrlczpY9bhYXh3fQfFOjN8C2smq2lVVz3cWn4bRbcdqtNHojX+RxOg79VWAoBI2NPhobfRF/XxER6RgxV+AZO3Zsu8eGQiG+/e1vM3369A5MJCIiIiIiIiIi0n7NxR2/P8jcRRtZ+uGX7bqvvKqO7G8lkZWexLay6ojnyuqRCDTtyBERkeNfzBV42nt2Z1ZWFuPHj+eHP/whCQkJHZxKRERERESkbeXl5axbt46Kigrq6+sPu765+eabOzGZiIh0FqfTFi7u/OapNazbWtnue0u+qiH7W03HtXVEgadPZjIAfr8KPCIiJ4KYK/AsX778sK/bbDaSkpKIj4/vpEQiIiIiIiKHt2/fPn7961+zbNmyNh9aC4VCGIahAo+IyAnIMAzcbicAcxdtPKLiDsCmHfsYe0YWowZlsGTVjojnGzkoAwBfB/X4ERGRzhVzBZ6ePXtGO4KIiIiIiEi71dfXU1RURGlpKXa7nX79+rFx40bsdjv5+flUVVXx5ZdNR/N06dKFvn37RjmxiIgcK8MAp9OO3W7FZrNgtVq+8ZrBuq2V7T6W7Zve+3QX10/IIy8nlaweiQft2XO0stITyctJJRgMYZrqsyMiciKwtD1EREREREREDuW5556jpKSE3r17s2zZMhYuXAg0FXOee+453nzzTZYvX864ceOora1l1KhRPPPMM1FOLSIiR6N5h063bm4SE+OIi7Njs1kxDAPDMMLj5i8uPqr5G0w/K9fuBGDqxLyIZG42dULTfKbpo50dEkREJMbFXIHH6/WyZcsWSktL2xxbWlrKli1b8Pn01IGIiIiIiETHsmXLMAyD2267jbS0tIOO6dmzJ4888gjjxo3jkUceYfXq1Z2cUkREjpXTaaNbtwTi4x1YLAbFpVU88cpGZjz6LpfNfJ3H/vYphtF0/Vh23rzw9lYO1HkpyE2jcHh2RLIXDs+mIDeNYDBIXZ03InOKiEj0xVyBZ8mSJUyaNIm//OUvbY7905/+xKRJk3jzzTc7IZmIiIiIiEhrn3/+OQDnnHNOi+t+v7/V2FtuuYVQKKQdPCIixxmXy0lSUjwWi4X1Wyu5afYKZj6+iiWrdrCtrJpGb4D+vboB8N6G3cf0XjW1JnMXbQTghkn5DMk9+MMD7TUkN41pk/IB8HjMNnvFiYjI8SPmCjxvvfUWAJdcckmbYydPnkwoFFKBR0REREREosY0TZKSknA4HOFrTqeT+vr6VmMzMzNJTExk48aNnRlRRESOgcvlJCHBgd8fZM7fPuXueasPukOnzynJAJTsrDnm93x3/S4Wv1OK3WbhzuuGHfVOnsLh2dx13TBsNgv19V5Ms/XDByIicvyyRTvAf9u+fTtWq5X8/Pw2xw4ZMgSbzca2bds6IZmIiIiIiEhrqampVFdXt7jWrVs3ysvLKS8vJz09PXw9EAjQ0NBAQ0NDZ8cUEZGj4HTawsWd3zy1hnVbKw85Nj3VBUBZ+YGIvPf8vzf18Zl4bg43FHD0igAAIABJREFUXTaYs/IzePK1YsrK2z7+LSs9kakT8ij4evdPfb2XujozIrlERCR2xFyBp7KyksTERGy2tqPZ7XbcbjeVlYf+x1VERERERKQjfetb3+Lf//43e/fuJSUlBYB+/fpRXl7O22+/zdVXXx0eu2LFCvx+Pz169IhWXBERaSfDMHC7nQDMXbTxsMUdALu16aAc0xeIyPuHQvDka8VsLavmxkvzKchNY87tYygureL9Dbsp2VlDWUUtpteP02Ejq0cifTKTGTkog7ycVACCwSAej6mdOyIiJ6iYK/DY7Xbq6uraNTYUClFfX9+uYpCIiIiIiEhHGDx4MOvWreOTTz7hggsuAODCCy9k5cqVPPzww5imSf/+/dmyZQtPPPEEhmG06tcjIiKxx+VyYLFYWLe1kqUfftnmeF8giNNixWm30uiNTJEH4L1Pd/FZSRVXjevH+cOyyctJDRdwDiUYDGGaPurqvOq5IyJyAou5HjynnHIKPp+P9evXtzl23bp1eL1eevbs2QnJREREREREWjv//PMJhUIsXrw4fO2iiy7izDPPpKGhgYceeoipU6fy4IMP4vF4SElJ4eabb45iYhERaYthgNNpB2D+4uJ23VNe1fTAclZ6UsTz1HhM3l5ThmEYBAJBGht9+P0BQqFQ+MfvD9DY6KO2tpF9+zx4PKaKOyIiJ7iYK/CcddZZhEIhHnroIfz+Q28f9fv9PPzwwxiGwdlnn92JCUVERERERP5j0KBBbNmyhccffzx8zTAM5s2bx7Rp0zjllFOwWq0kJyczYcIEFi5cqCPaRERinNNpx2IxKC6toqyi7Z43ACVf1QDQJzO5QzI1z+vzBaitbaS6up6qKk/4p7q6ntraRhobfaiuIyJycoi5Ak9RURFOp5O1a9dy7bXXsmnTplZj/vWvf3HNNdewdu1aHA4HRUVFUUgqIiIiIiJyaHFxcdx66628/fbbFBcXs3r1ambPnk1GRka0o4mISBvsdisA723Y3e57Nu3YB8CoQR3z9/zIr+f1RajHj4iIHP9irnlNeno699xzDzNnzuSTTz7hu9/9LqmpqeFj2Hbt2kVVVRWhUAjDMLj33nu1QBIRERERkah59dVXARg5ciSpqYfviSAiIscHm63pmeiSnTXtvue9T3dx/YQ88nJSyeqR2O6dP+2RlZ5IXk5quLeOiIgIxGCBB+CSSy4hOTmZ3/zmN+zatYs9e/awZ8+eFmMyMzO566671JxURERERESi6o477sBms/Hxxx9HO4qIiBxCc08du92KzWbBav3PoTaBQBC/P4jPF8A0m443a369rPxAu9+jwfSzcu1Oxp/dm6kT87h73uqI5Z86IQ8gnE9ERARitMADMHr0aEaNGsWaNWtYt24dVVVVGIZBamoqQ4YMYdiwYVgsMXfCnIiIiIiInGS6dOkCQHx8fJSTiIjIfzMMA5fLEe6pczA2mxWbzUpcnB2Xy9lih4x5hMehvfD2VkYN7klBbhqFw7NZ+uGXx5QfoHB4NgW5aQSDQerqvMc8n4iInDhitsADYLVaOeusszjrrLOiHUVEREREROSgTj31VDZu3EhdXR0ulyvacURE5GtOpw232xl+QLi4tIr3NuymZGcNZeUHMH0BnHYrWelJ9MlMZtSgDPJyUomPdxD6epuM026l0dv+Ik9NrcncRRu5/aqh3DApn8rqBtZtrTzqzzAkN41pk/IB8HjMcC4REREA669//etfRzuExKaGBq+2/YqIiIhIuxmGQUKCI9oxOl0oFGL58uV069aNwYMHRzuOtIPWOiInPpfLidsdh2EYrN9ayX1PreGVf5awfWcNe/c34g80/SXgD4TYu7+R7TtrWP7xTlZt3E1GqotvpboBWPOvcvbubzyi9/6yvBZXnJ0Bp6YwclBP9ntMSr7af8SfoXB4NrdNOR2bzUJ9vZeGBvXeERGJhlhe58RcgWfv3r289NJL7Ny5k759+x527GuvvcaaNWvIzMzUcQgdQIseERERETkSsbzw6UinnXYa27Zt47nnnqNLly4MGDBAx0nHOK11RE5sLpeThAQHfn+QJ17ewJ8XF7O/nUeb7fd4Wbn2K4bkppGaHM/nu/ezfWfNEWdYv62SBGdTkefM09Lp36sbpbv2s9/Tdo6s9ERmTDmdCefkYLEY1Nd7qaszjziDiIhERiyvc2LuiLbXXnuN2bNnc/PNN7c5dsuWLSxYsACAoqKijo4mIiIiIiLSysyZM3G5XDgcDu677z7++Mc/MnDgQFJSUg5Z6DEMg9/+9rednFRE5MTndNrCxZ3fPLXmqI9He/ujMvr16saoQRksWbXjiO8PheDJ14rZWlbNjZfmU5Cbxpzbx1BcWsX7zcfEVdRiev04HTayeiTSJzOZkV8fEwcQDAbxeExM039Un0FERE58RijGDu+8+uqr+eSTT3j99dfJyck57Nht27YxYcIEhg0bxl/+8pdOSnjy2LvXQzAYU78eIiIiIhLDLBaDlBR3tGN0un79+mEYRrv6IjSPMwyDzZs3d0I6ORitdUROTIZh0K1bAhaLhTl/+5SlH3551HPFO208ffcFJMTZuGn2Csoqao96rmS3kyvOz+X/nZ5JQlzbz1oHgyFM00ddnVc9d0REYkAsr3NibgdPWVkZDoejzeIOQN++fXE6nezcubMTkomIiIiIiLR2ySWXYBhGtGOIiJz0XC4HFouFdVsrj6m4A9Bg+lm5difjz+7N1Il53D1v9VHPVeMx+dMrG/nLPzYxanBPplyQS0qX+BbFm0AgiN8fxOcLYJo+HSMpIiLtEnMFnr179+J2t78aFh8fT1VVVQcmEhERERERObTf/e530Y4gInLSMwxwOu0AzF9cHJE5X3h7K6MG96QgN43C4dkRKRpZDEjpEk8wGGTfvnrt0BERkWMSc50/3W43tbW1mGbbzeNM06S2tpb4+PhOSCYiIiIiIiIiIrHI6bRjsRgUl1Yd03Fq31RTazJ30UYApk3KZ0hu2jHNNyQ3jWmT8gHweEwVd0RE5JjFXIHn29/+NsFgkJUrV7Y5dsWKFQQCAXr37t0JyURERERERI4PdXV1/PGPf+T666/nzDPPJDc3l1deeeWgY0tLS7n++uspKCjgzDPP5Pbbb2ffvn2txgWDQf785z8zZswYBg4cyMUXX8zrr7/e0R9FRKRd7HYrAO9t2B3Red9dv4vF75Ris1m467phFA7PPqp5Codnc9d1w7DZLNTXezFNf0RziojIySnmCjxjxowhFAoxe/ZsKioqDjmuoqKC2bNnYxgG5513XicmFBERERERiW3V1dXMmTOHzz//nNzc3EOOKy8v58orr6SsrIxbb72V6667jnfeeYdrr70Wr9fbYuwjjzzCgw8+yNlnn81dd91FRkYGM2bM4B//+EdHfxwRkTbZbE1fcZXsrIn43PP/Xsy767/CZrNw02WDufeGEWSlJ7br3qz0RO69YQQ3XTY4XNypq2v71BoREZH2MEIxth+0oaGBcePGUVFRQXJyMtOmTWP06NFkZGQAsHv3blauXMm8efOorq4mPT2dJUuWkJCQEOXkJ569ez0EgzH16yEiIiIiMcxiMUhJaX8/zRPNnj17ePnll1m7di3l5eU0NDQc8vgdwzBYtmxZh2Xxer3s37+f7t2789lnnzF58mRmzZrFpZde2mLcr3/9axYtWsQbb7wRXnN98MEHXHvttdx7771873vfA5oesBs7diyXX345d999NwChUIirrrqKr776ihUrVmC1Wjvs80Sa1joiJ57UVDeGYXDZzNdp9AYiPn+808bC344nFAphGAYAxaVVvL9hNyU7ayirqMX0+nE6bGT1SKRPZjIjB2WQl5MKNO2C9HhM7dwRETkOxfI6xxbtAP8tPj6eOXPmMHXqVKqrq/n973/P73//+1bjQqEQXbt25YknnlBxR0REREREourtt9/mF7/4RZtFnebXmr8c7CgOh4Pu3bu3Oe6tt95q8UAdwFlnnUWvXr144403wgWeZcuW4fP5mDJlSnicYRh8//vfZ8aMGaxfv56hQ4dG/oOIiBwh0xf54g6A6f1PYaahwYvTaScvJzVcwDmUYDCEafqoq/Oq546IiERczB3RBnDaaaexaNEiLr74YqxWK6FQqMWPzWbjkksu4dVXX6V///7RjisiIiIiIiexkpISZsyYQX19Peeeey7/8z//A0BiYiL33Xcft9xyC2eeeWb4IbW77rqL3/72t1FO3bQrZ+/eveTl5bV6LT8/n82bN4f/vHnzZhISEsjJyWk1rvl1EZFY4LR3zG5Cp+M/z0h7PCb79nmorW2ksdGH3x9o8b2V3x+gsdFHbW0j+/Z58HhMFXdERKRDxNwOnmbp6ek88MAD3HvvvRQXF7Nnzx4Mw6B79+7k5eURFxcX7YgiIiIiIiI8/fTTeL1eJkyYwOzZswG45557cDqdTJ48GYAbb7yRd955h1tuuYVXX32V559/PpqRAaisrAQ46E6f7t27U1NTg9frxeFwsGfPHlJSUlrtPGq+t3kuEZFoCQSC2GxWstKT2FZWHfH5s3okht8HIBSCxkYfjY2+iL+XiIhIe8XkDp5vio+P54wzzuDCCy9k3LhxDB06VMUdERERERGJGR999BGGYTBt2rTDjjv33HP5xS9+wWeffcZf/vKXTkp3aKbZ1OTb4XC0es3pdALQ2NgY/m97xomIRIvf31R46ZOZ3CHzN8/b/D4iIiKxIOYLPCIiIiIiIrGsoqICq9Xa4vgywzDw+Vo/1T1x4kSsVitLlizpzIgH1Vyc8Xq9rV5rLv40P1wXFxfXrnEiItHi+7r3zqhBGW2MPDojv57X10E9fkRERI5GzB7RtmXLFp577jnWrl1LeXk5DQ0NhxxrGAabNm3qxHQiIiIiIiJN7HY78fHxLa4lJCRQW1uL3+/HZvvPsis+Ph6Xy0VZWVlnx2wlLS0NgD179rR6bc+ePSQnJ4d37XTv3p01a9YQCoVaHNPWfG/zXCIi0WKaPlwuJ3k5qWT1SKSsojZic2elJ5KXk0owGMI0dSSbiIjEjpjcwfPss88yefJkXnrpJT7//HPq6+tbNKs72I+IiIiIiEg0pKWl4fF4CAb/c2xPz549CYVCbNmypcXY/fv3c+DAgYPu7ulsPXr0oFu3bhQXF7d6bePGjfTr1y/85/79+9PQ0EBpaWmLcRs2bAi/LiISTaEQ4eLL1Il5EZ176oSm+UzTh76CEhGRWBJzBZ4NGzZw//33EwgEmDJlCvPmzQOgS5cuLFiwgAceeIBJkyZht9vp2rUrDz30UIefX71mzRpyc3MP+vPpp5+2GLtu3Tq+//3vM2jQIM4++2zuu+8+6urqOjSfiIiIiIhET69evQgEAnz++efha0OGDCEUCvHUU0+1GPuHP/wBgN69e3dqxkM5//zz+ec//8m///3v8LXVq1fzxRdfUFhYGL42duxY7HY7zz//fPhaKBTihRdeoEePHhQUFHRqbhGRg6mr8xIMBinITaNweHZE5iwcnk1BbhrBYJC6utZHVYqIiERTzB3R9te//pVQKMQPfvADZs6cGb5ut9sZMWIEABdffDFFRUVcf/31PProo7zyyiudku3qq69m4MCBLa5lZWWF/3/z5s1cc8015OTkcMcdd1BeXs5TTz3FF198wZNPPtkpGUVEREREpHONGDGCFStW8N5779GnTx8ArrjiChYuXMgbb7zBtm3byM3NZdu2bZSUlGAYBt/97nc7PNezzz7LgQMHqKysBGDlypWUl5cDTWubxMREbrzxRpYuXUpRURFFRUXU19czf/58+vbt2yJjeno6RUVFzJ8/H7/fz8CBA1m2bBmffPIJDz74IFartcM/j4hIW0KhEB6PSVJSPNMm5VNZ3cC6rZVHPd+Q3DSmTcoHwOMxdYKMiIjEHCMUY/86jRkzhn//+98sW7aMnj17AtCvXz9SU1N5//33W4xdsmQJt912Gz/60Y/46U9/2mGZ1qxZQ1FREY8++miLp9j+2w9/+EM2b97M0qVLcbvdAPztb3/jzjvvZP78+YwcObLDMnaEvXs9BIMx9eshIiIiIjHMYjFISXFHO0anq6qq4qGHHiI7O5sbb7wxfP3ZZ59l1qxZBAItG3KPHz+ehx56qMNzjRkzhl27dh30teXLl3PKKacAsH37dn73u9+xdu1a7HY75557LnfccQepqakt7gkGg/z5z3/mxRdfpLKykl69enHDDTcwYcKEDv8skaa1jsiJzeVykpDgwO8PMnfRRpZ++OURz1E4PJtpk/Kx2SzU13upqzM7IKmIiBwPYnmdE3MFnvz8fAzDCJ/lDDBgwABcLhcff/xxi7E+n4+CggJ69erF66+/3mGZvlngGTlyJHFxcS0apQJ4PB6GDRvGD37wA37+85+Hr3u9XoYNG8aFF17I/fff32EZO4IWPSIiIiJyJGJ54RMtpaWlvPnmm5SXl+N2uxk1alT4ZAKJHq11RE58zUUegPVbK3nytWLKymvbvC8rPZGpE/IoyE0DUHFHRERiep0Tc0e0xcfHt7rmcrnweDx4vV4cDkf4ut1uJz4+/pBPpUXazJkzqa+vx2q1cvrpp/Pzn/88fGTb1q1b8fv95OW1bOTncDjo378/mzdv7pSMIiIiIiISO3Jycpg+fXq0Y4iInHTq6kz8/gBut5OC3DTm3D6G4tIq3t+wm5KdNZRV1GJ6/TgdNrJ6JNInM5mRgzLIy2navRgMBvF4TEzTH+VPIiIicmgxV+BJS0tjx44d+P3+8C6ZzMxMNm/ezMaNGxk6dGh4bEVFBbW1tQctCkWS3W7nggsu4JxzzqFr166UlpYyf/58rrzySl544QUGDBjAnj17wvn/W/fu3Vm7dm2HZhQRERERkdiwfft2iouL2bt3LwApKSnk5eXx7W9/O8rJREROLqbpx+sN4HI5cDrt5OWkhgs4hxIMhjBNH3V1XvXcERGRmBdzBZ6cnBxKSkrYtm0bAwYMAGDYsGFs2rSJxx9/nCeeeAKn04nX6w0feda3b98OzTRkyBCGDBkS/vPYsWO54IILmDBhAg899BDz58+nsbERoMUOo2ZOpzP8uoiIiIiInJhWrlzJww8/TElJyUFf79OnD7fccgtjx47t5GQiIpFlGOB02rHbrdhsFqxWS/i1QCCI3x/E5wtgmj6iXSMJhUJ4PCZ1deZxk1lERKS9Yq7Ac/bZZ7N06VJWrFgRLvBMmTKF5557jtWrV3POOefQu3dvvvjiC/bv349hGFx55ZWdnjM7O5uxY8fy1ltvEQgEiIuLA5p67vw30zTDr4uIiIiIyInnscceY86cOeGnvW02G8nJyQDU1NTg9/vZvn07N998M9OnT+fHP/5xNOOKiBwVwzDCu2EsFuOgY2w2Kzablbg4Oy6XM2Z2w4RC0Njoo7HRF9UcIiIikRRzBZ4LLriAiooKevToEb6WmZnJQw89xMyZM9m/fz+ffvopABaLheuvv54JEyZEJWt6ejo+n4+Ghga6d+8OQGVlZatxe/bsOejRbSIiIiIicvx79913eeyxxwA444wz+NGPfsTQoUPDu/u9Xi+ffPIJf/rTn/joo494/PHHGTx4MKNGjYpmbBGRI+J02nC7nVgsTTtfikureK+5n035AUxfAKfdSlZ6En0ykxn1dT+b+HgHTqdN/WxEREQ6QMwVeJKSkrj55ptbXf/Od77DGWecwTvvvEN5eTlut5uRI0eSnZ0dhZRNvvrqK5xOJwkJCfTt2xebzUZxcTEXXnhheIzX62Xz5s2MGzcuajlFRERERKTjPP300wAUFhbyyCOPYBgtn2p3OBycddZZjBgxgltvvZWlS5fy9NNPq8AjIscNl8tJQkJT0Xr91kqeXFxMWUVtq3GN3gDbyqrZVlbNklU7yEpPZOqEPApy00hKiqe+3ktdndnZ8UVERE5YMVfgOZzk5GQmTpzY6e+7b98+unXr1uLali1bWLFiBaNGjcJisZCYmMiIESN47bXXmD59Om63G4DFixdTX19PYWFhp+cWEREREZGOV1xcjGEYzJw5s1Vx55sMw+COO+5g6dKlfPbZZ52YUETk6DUXd/z+IHMXbWTph1+2+96y8lrunreawuHZTJuUHy4SqcgjIiISGcdVgSdabrnlFuLi4igoKCAlJYWSkhIWLlxIXFwcP/vZz8Ljbr31Vq644gquvvpqLr/8csrLy1mwYAEjR47knHPOieInEBERERGRjuLz+UhKSmpxzPShpKen06VLl4P27hQRiTVOpy1c3PnNU2tYt7X1sfTtsfTDL6msbuCu64Z9PV9Ax7WJiIhEgCXaAY4H5513HtXV1Tz99NPcc889LFmyhO985zu8/PLL5OTkhMeddtppLFiwAKfTyaxZs1i4cCGTJ0/m0UcfjWJ6ERERERHpSKeccgp1dXXtKtp4vV7q6urIzMzshGQiIkfPMAzcbicAcxdtPOriTrN1WyuZu2gjAG6387A7HkVERKR9tIOnHYqKiigqKmrX2KFDh/LCCy90cCIREREREYkVF198MQ8//DCLFy/msssuO+zYxYsX4/f7ueiiizopnYjI0XG5HFgsFtZtrTyiY9kOZ+mHX3JWfgYFuWm4XA48Hh3VJiIiciy0g0dEREREROQYXHvttZx++uncd999LFq06JDjXn31Ve677z6GDh3Kdddd14kJRUSOjGGA02kHYP7i4ojO/eRrTfM5nXa0iUdEROTYGKFQKBTtEBKb9u71EAzq10NERERE2sdiMUhJcUc7Rqd77LHH8Pl8PP/883g8Hr71rW9x5plnhnvyVFRU8NFHH/Hvf/+bxMREvv/972O32w86180339yZ0U9aWuuIHF5cnJ3ExDiKS6uY+fiqiM8/a/rZ5OWkUlvbSGOjL+Lzi4iIRFIsr3NU4JFD0qJHRERERI5ELC98OlK/fv3CvSSal1f/3VviUNf/2+bNmzsgofw3rXVEDi8xMY64ODtPvLKRJat2RHz+8Wf35sZL82ls9FFb2xjx+UVERCIpltc56sEjIiIiIiJyDM4444xoRxARiSibrelE/5KdNR0yf/O8ze8jIiIiR0cFHhERERERkWPwzDPPRDuCiEhEWa1NhZey8gMdMn9ZRW2L9xEREZGjo39JRURERERERESkFdMX6Jh5vf4OmVdERORkowKPiIiIiIiIiIi04rRbO2Zehw6UERERiQQVeEREREREREREJCwQCAKQlZ7UIfNn9Uhs8T4iIiJydFTgERERERERERGRML+/qfDSJzO5Q+Zvnrf5fUREROToqMAjIiIiIiIiIiJhvq9774walNEh84/8el5fB/X4EREROVmowCMiIiIiIiIiImGm6SMYDJGXkxo+Ti1SstITyctJJRgMYZq+iM4tIiJyslGBR0REREREREREwkIhwsWXqRPzIjr31AlN85mmj1AoolOLiIicdFTgERERERERERGRFurqvASDQQpy0ygcnh2ROQuHZ1OQm0YwGKSuzhuROUVERE5mKvCIiIiIiIiIiEgLoVAIj8cEYNqkfIbkph3TfENy05g2KR8Aj8ckpO07Iv+fvfuPjqq+8z/+und+3CQzAQwhBJSgBo22aRC0BSzUrdiaSouli7V+i2gpW6zdbdXd7lJ/cDyFbn/ZLrUHXTyAdd1u/bGnLNiWYHvcrWDFtQuCqRgKCiGmIfxIJDNh7vy48/0jzEhMQn4wk5nJPB/n5Jx25s5nPmOiyfvzup/3BwDOGQEPAAAAAAAAerDtqDo7w3K7TT2wZMaQd/LUzpysB5bMkNttqrMzLNuOpnimAADkJyPOLRPow/HjATkOPx4AAAAYGNM0NHasP9PTAPpFrQMMjs9nqajIK0na1dCqdZvr1djS0e/rKsqLtXR+taad3v3T2RlWMGinda4AAKRaNtc5BDzoE0UPAAAABiObCx/gTNQ6wOBZllt+vyXT7GoGU3/gmLbvbtb+w+1qPNIhOxyV5XWrYnyxpkwao9lTJ6q6slSS5DiOAgGbnTsAgJyUzXWOO9MTAAAAAAAAQHaz7ajC4Zh8Pq8sy6PqytJkgNMXx4nLtiMKBsOcuQMAQBoQ8AAAAAAAAKBf8XhcgYCtYNCWZXnk8bjkdptyud474jkWcxSNOopEYrLtiMh1AABIHwIeAAAAAAAADFg8LoVCEYVCkUxPBQCAvGb2fwkAAAAAAAAAAACyCQEPAAAAAAAAAABAjiHgAQAAAAAAAAAAyDEEPAAAAAAAAAAAADmGgAcAAAAAAAAAACDHuDM9AQAAAAAAgJHEMCTL8sjjccntNuVyvXd/bSzmKBp1FInEZNsRxeMZnCgAAMhpBDwAAAAAAAApYJqGiosL5PG4ZBhGr9e43S653S4VFHjk81my7YiCwbDiJD0AAGCQCHgAAAAAAADOgWEYGjWqe7BTf+CYtu1u1v7D7WpsOSk7EpPlcamifJSmTBqjOVMnqrqyVIWFXlmWW4GALduOZviTAACAXGLEuUUEfTh+PCDH4ccDAAAAA2OahsaO9Wd6GkC/qHWQSpblVnFxQTLY2dXQqnWb6tV4pKPf11aUF2vp/GpNqyqTJHV2hhUM2mmdLwAAGJxsrnMIeNAnih4AAAAMRjYXPsCZqHWQKn6/pcJCryQpGnW0duMe1e04NOhxamdO1rIFNXK7TUIeAACyTDbXObRoAwAAAAAAGATDMDRmTKHcbpekrnBn5YZXtLOhdUjj1e04pNa2U3pgyQwVFXkVjcZo1wYAAPplZnoCAAAAAAAAucAwunbtjB3rk9vtUqIpytqNe4Yc7iTsbGjV2o17JHW9R6LlGwAAQF8IeAAAAAAAAM7CMIzTwY4/2ZItHo/LMAztbGgdUlu23tTtOKRdDa0yTVM+nzclYwIAgJGLgAcAAAAAAKAPluVWSUmRCgu9MgxDjhPvtrtm/ab6lL7fus3N38SSAAAgAElEQVT1p9/XIzbxAACAsyHgAQAAAAAAeB/DkEaPLtSoUYUyza7lk0jUUSTqJHfv1B84psYjHSl938aWDtUfOCbTNGRZnpSODQAARhYCHgAAAAAAgNPObMfm9boVjTo6ZUclSdtfa5LldSV38Gzb3ZyWOWw/Pa7H40rL+AAAYGQg4AEAAAAAAFDPdmzRqKOdDa0qtNza2dAqJ979+v2H29Myj8S4bjfLNgAAoG/8pQAAAAAAAPLa+9uxRaOOJGnDc/WqriyV1HXWzpQLxnR7XWPLybTMJ9H2zeVi2QYAAPSNvxQAAAAAAEBe6q0d2+v7j8rtNrWzoVV2xFFRgTt51k55qa/b6+1ILC3zssPRtIwLAABGFgIeAAAAAACQd3prx/aDJ/+oygvOk9S1Y+cDF5VIeu+sHc/7dtRYaTojx/K60zIuAAAYWQh4AAAAAABAXvH5rB7t2NZu3KNin7fbjp1ES7bEmTiRmNNtnIryUWmZX8X4YklS7H3vBwAAcCYCHgAAAAAAkDd8PktFRd4e7djqdhzqsWMn0ZItcdZOy7Fgt7GmTOp+Jk+qJMZNhE8AAAC9IeAZokcffVRVVVX69Kc/3eO5nTt36pZbbtHUqVP10Y9+VKtWrVIwGOxlFAAAAAAAMFwsy50Md97fjk1Sjx07iZZsibN29je1dxtvztSJaZnn7NPjRtJ0xg8AABgZCHiGoKWlRWvXrlVRUVGP5/bu3avbb79doVBIy5cv18KFC/X000/rG9/4RgZmCgAAAAB9e+WVV1RVVdXr12uvvdbtWm5kQ64zDEN+vyWp93ZsUs8dO4mWbImzdt54+0RyvJjjqLqyNNlOLVUqyotVXVmqeDwu246kdGwAADCycGrfEHz/+9/X1KlT5TiO2trauj334x//WKNGjdKTTz4pv98vSbrgggt0//33a/v27Zo9e3YmpgwAAAAAfbr11lv1oQ99qNtjFRUVyf+duJGtsrJSy5cvV0tLizZs2KCDBw9q3bp1wz1dYEh8Pq9M8712bHd9YZqk99qxST137LQcC2ryhFGqKB+lfY1t2vbaO/ry/GoVWi65zK5rl95YrRWPvZyyeS6dXy1JCoUiisdTNiwAABiB2MEzSK+++qq2bt2qe++9t8dzgUBAf/jDHzR//vxkuCNJN954o4qKirRly5bhnCoAAAAADMhVV12lG2+8sdtXSUlJ8vkzb2S75ZZbdPfdd2vFihXatm2btm/fnsGZAwNjGJJleST13Y5N6rljJ9GSLXEmzik7qv/+v8MyDENS1xk506rKVDtzckrmWTtzsqZVlSkejysYDKdkTAAAMHIR8AxCLBbTypUrtXDhQlVVVfV4vqGhQdFoVNXV1d0e93q9uvzyy7V3797hmioAAAAADEogEFA0Gu31cW5kQ66zLI9M0zhrOzapa8eOJFWUj5L0Xku2M8/aeeq3DeroDCsej8vt7lpW+cqCGk2vKjunOU6vKtOyBTWSpI4OW3G27wAAgH4Q8AzCU089pebmZt111129Pn/06FFJUllZzz/qxo0bp9bW1rTODwAAAACG4lvf+pauvPJK1dTU6NZbb9Xrr7+efI4b2TASeE7vyDlbOzap546dba+9o85QtNtZO+0dtv71l3uSu3hiTlwet6n7l8wY8k6e2pmT9cCSGXK7Tdl2lLN3AADAgBDwDFBbW5sefvhh3Xnnnd1aFZwpFApJ6ip03s+yrOTzAAAAAJANPB6Prr/+et1333165JFHdNddd2nfvn364he/qDfeeEMSN7JhZEjstDlbOzap546dREs2qeusnYQXd72jTb8/IElymUYy5PnaTVfo21+ZpYry4gHNq6K8WN/+yix97aYr5HabikRiOnny1FA/JgAAyDPuTE8gV6xevVqjR4/WokWL+rymoKBAkhQO9+yTa9t28nkAAAAAyAbTp0/X9OnTk/9/7ty5uv766zV//nz96Ec/0vr167mRDSOC6/Runfe3Y5s8YZQqykdpX2ObpK4dO1+eX53csdN4pENP/bZBc644P3nWTt2OQ5Kk9c91neVz4zWVcpndz+RZ881rVX/gmLbvbtb+w+1qPNIhOxyV5XWrYnyxpkwao9lTJ6q6slSSFI/HFQ4T7gAAgMEh4BmAgwcP6plnntG9997b7e4027YViUTU1NQkv9+vcePGSVKvd7AdPXq01zveAAAAACCbTJ48WXPnztXzzz+vWCzGjWwYUd7fjm3yhFGaMmlMMuBJ7NiZ99GLtPTGaq147GW1d9hau3GPvrnoKn1lQY1a205pZ0Or4nFp3eZ6NTS26W9vukKFliu5UyjmOKquLE0GOH1JnLMTCIQUCvU8AwsAAOBsaNE2AEeOHJHjOFq1apXmzp2b/Nq9e7cOHjyouXPnas2aNbr00kvldrtVX1/f7fXhcFh79+7VZZddlqFPAAAAAAADV15erkgkolOnTnEjG0aUs7VjS3jqtw06GQwnd+xI77Vk6+2snW2vvaNl3/2dfvdqoyJRR/F4XC6z/+WWeDyuUCii48eDhDsAAGBICHgG4JJLLtGaNWt6fF1yySWaOHGi1qxZo4ULF6q4uFizZs3S5s2bFQgEkq/ftGmTOjs7VVtbm8FPAQAAAAAD09TUJMuyVFRUxI1sGBFip8/bqSgflXxs22vvqDMUTbZjS0js2JGkryyo0fSqrhBz/XP1yZDn/WfttAdsPfz0a/riii1au/F17X37uAKdYTnxuOJnfCXYdlTHjwcUCNjdHgcAABgM14MPPvhgpieR7QoLC3XxxRf3+NqyZYtisZjuv/9+lZZ2bbuurKzUz3/+c/3+979XPB7X7373O/3kJz/RrFmz9PWvfz3Dn2RwTp0Ki78zAQAAMFCGYaioqOc5LcheJ06cUGFhYbfH3nzzTX3/+9/X7Nmz9ZnPfEaWZWnXrl166aWX9PnPfz55Fs8vf/lLbd26Vffcc48mT57c2/BZi1on/3g8LrndLr3V/K7+fLhdkhSNOSodU6hLK87TxFKf/vv/mpLXH2rpkK/Aow9cPFazp56vdwO29je9q50NrWpqDehDU0p14YRRmvfRi1QzpVQFXrdMw9C7wbDePHhC23Y3a1fDUb3dfFKFllvjS3wyDEOO46ijI6TOzp4tDwEAQHbK5jrHiHOryJDdeuutamtr069+9atuj//xj3/UQw89pDfeeEM+n0+f+tSndM8998jv92dopkNz/HhAjsOPBwAAAAbGNA2NHZtbf/Pmu8WLF6ugoEDTpk3T2LFjtX//fj3zzDNyu916+umnVVlZKUn605/+pC984QuaMmWKPv/5z6ulpUWPP/64PvzhD2v9+vUZ/hSDR62TfwoKPCouLlD9gWP61iMvJR8fU2xpzTev1SifV2uefU11Ow4lnzMM6cufqdaN13T9e7CroVXrNtersaVDY/yWvvDJKn38ykkqKuj/eONEO7ZgMMyOHQAAckw21zkEPOgTRQ8AAAAGI5sLH/Tu3/7t3/Tcc8+psbFRgUBA5513nmbNmqW//du/7bErZ6TcyCZR6+Qjw5BKSvwyTUNf+8ELajzSkXzuY9PO1zcXXaVI1NGqDa9oZ0P386bmXHG+7vhcjUb5uu7crT9wTNt3N2v/4Xa1tnXqIx8sV/XFY1U1uUTjziuUyzRkGIakM4Mdm11jAADkqGyucwh40CeKHgAAAAxGNhc+wJmodfKT32+psNCrXQ2tWvHYy92eWzq/a6dOJOrosY17uu3kkcSOHQAA8lg21zmcwYM+0ZcaAAAAg5HNvamBM1Hr5KdIxFFBgVsTx/nVdjKk/U3vJp/bta9VRVbXmTsf+WC5Lr+wRAfeeVfvBrrOygmFY/rj3iP69Utvq+V4pwKdYZmmocICt0zTUDyu5K4d246qvb1T4XAsI58TAACkVjbXOQQ86BNFDwAAAAYjmwsf4EzUOvnLceKyLI+mV5Xpz4fb9ZfjweRzOxta1dQa0IemlOrCCaM076MXqWZKqQq8bpmGoc5QVHY4qneOBnW07ZROnAzJX+jR+BKfDMOQ4zjq6AipszOcwU8IAABSLZvrHFq0oU+0LQAAAMBgZHPrAuBM1Dr5zeezVFTkVTTqaC3t2AAAQD+yuc4h4EGfKHoAAAAwGNlc+ABnotZBIuSRpF0NrVq3uV6NLR3drim03Jpzxfn6wEUlqrxgjCaU+uRxm1K86793Ulc7to6OU+wIAwBgBMvmOoeAB32i6AEAAMBgZHPhA5yJWgeSZFlu+f2WTNOUJNUfOKbtu5u1/3C7Go90yA5HZXndqhhfrCmTxmj21ImqriyVJDmOo0DAlm1HM/kRAADAMMjmOqf/vcYAAAAAAAAjjG1HFQ7H5PN5ZVkeVVeWJgOcvjhOXLZNOzYAAJAdCHgAAAAAAEBeisfjCgRsBYO2LMsjj8clt9uUy2Umr4nFHEWjjiKRmGw7Qjs2AACQNQh4AAAAAABAXovHpVAoolAokumpAAAADJjZ/yUAAAAAAAAAAADIJgQ8AAAAAAAAAAAAOYaABwAAAAAAAAAAIMcQ8AAAAAAAAAAAAOQYAh4AAAAAAAAAAIAcQ8ADAAAAAAAAAACQYwh4AAAAAAAAAAAAcgwBDwAAAAAAAAAAQI4h4AEAAAAAAAAAAMgxBDwAAAAAAAAAAAA5hoAHAAAAAAAAAAAgxxDwAAAAAAAAAAAA5BgCHgAAAAAAAAAAgBxDwAMAAAAAAAAAAJBjCHgAAAAAAAAAAAByDAEPAAAAAAAAAABAjiHgAQAAAAAAAAAAyDEEPAAAAAAAAAAAADmGgAcAAAAAAAAAACDHEPAAAAAAAAAAAADkGAIeAAAAAAAAAACAHEPAAwAAAAAAAAAAkGMIeAAAAAAAAAAAAHIMAQ8AAAAAAAAAAECOIeABAAAAAAAAAADIMQQ8AAAAAAAAAAAAOYaABwAAAAAAAAAAIMcQ8AAAAAAAAAAAAOQYAh4AAAAAAAAAAIAcQ8ADAAAAAAAAAACQYwh4AAAAAAAAAAAAcgwBDwAAAAAAAAAAQI5xZ3oCueDPf/6zfvrTn+pPf/qTjh07poKCAk2ZMkVf/vKXde2113a79sCBA/rnf/5n7dy5Ux6PR9dcc42+9a1vqaSkJEOzBwAAAABkA8OQLMsjj8clt9uUy/XePZexmKNo1FEkEpNtRxSPZ3CiAAAAyAkEPAPQ3NysYDCoBQsWqKysTKdOndLzzz+vr371q/r2t7+tm2++WZLU0tKiL37xiyouLtbdd9+tzs5ObdiwQfv27dOzzz4rr9eb4U8CAAAAABhuhmHI5/PKsjwyTaPXa9xul9xulwoKPPL5LNl2RMFgWHGSHgAAAPTBiPPX4pDEYjF97nOfk23bqqurkyQ9+OCD2rhxo7Zs2aKJEydKkv7whz/oS1/6UrcgKFccPx6Q4/DjAQAAgIExTUNjx/ozPQ2gX8NZ61iWW36/JdPs2q1Tf+CYtu1u1v7D7WpsOSk7EpPlcamifJSmTBqjOVMnqrqyVJLkOI4CAVu2HR2WuQIAAKCnbK5z2MEzRC6XSxMmTNDrr7+efOz555/XX/3VXyXDHUm6+uqrdeGFF2rLli05F/AAAAAAAIbO57NUVNTVyWFXQ6vWbapX45GOHteFwjHta2zTvsY2/ealt1VRXqyl86s1rapMo0YVqrMzrGDQHu7pAwAAIMsR8AxCZ2enQqGQAoGAXnjhBb344ov61Kc+JUk6cuSIjh8/rurq6h6vq6mp0Ysvvjjc081a9J0GAAAAMBKdWet4vS6Zpqlo1NHajXtUt+PQgMdpbOnQisdeVu3MyVq2oCYZEhHyAAAA4EwEPIPwve99T08//bQkyTRNfeITn9CKFSskSa2trZKkcePG9XjduHHj1N7ernA4nNfn8NB3GgAAAMBI1FetE406WrnhFe1saB3SuHU7Dqm17ZQeWDJDRUVeRaMx2rUBAAAgiYBnEG677TbV1taqtbVVW7ZskeM4ikQikiTb7rqTqrcAx7IsSVIoFMrbgGeofacLC72yLDd9pwEAAABkpffXOm8ePK4LJ4xWgeXW2o17hhzuJOxsaNXajXv0tZuukN9vKRyOcQMcAAAAJBHwDEplZaUqKyslSZ/97Ge1ZMkS3XHHHXr22WeTIU44HO7xukT4U1BQMHyTzSL0nQYAAAAwEvVW69zw0Yt02YVjtbOhdVBt2c6mbschXV0zUdOqyuTzeRUIUBcBAABAMvu/BH25/vrr9frrr+vtt99WWVmZJOno0aM9rjt69KjGjBmTl7t3EgVPNOpozbOvacVjL/ca7vQm0Xd6zbOvKRp1VFTklc9npXnGAAAAANC/3mqdo+2n9PErJ0mS1m+qT+n7rdvcNZ5leWT03vEaAAAAeYaA5xyEQiFJUiAQ0Pjx41VSUqL6+p5/xO/Zs0eXXXbZcE8v4yzLnSx4Vm54Zch3r9XtOKSVG15JhjyWxcYzAAAAAJnTV60z54rzVVTgVv2BYwO+sW2gGls6VH/gmEzTkGV5Ujo2AAAAchMBzwAcP368x2ORSESbNm1SQUFBsm3bJz/5Sf3P//yP/vKXvySve/nll3Xw4EHV1tYO23yzgWEY8vu7dtuksu+0JPn9lgxuWQMAAACQAWerdT5wUYkkadvu5rS89/bT43o8rrSMDwAAgNzCVogBWLFihQKBgD784Q9r/PjxOnr0qJ577jm99dZbWr58uXw+nyTpjjvuUF1dnRYvXqzFixers7NT69ev16WXXqq//uu/zvCnGF4+n1emadJ3GgAAAMCIcrZaZ8oFYyRJ+w+3p+W9E+O63dyrCQAAAHbwDMgNN9wg0zT1i1/8Qg8++KB+9rOfqby8XI888oi+9KUvJa+bMGGC/v3f/10VFRX60Y9+pHXr1uljH/uYHn/88bw6f8cwlGwZQN9pAAAAYGQIh8P64Q9/qNmzZ6umpkY33XSTXnrppUxPa1j1V+uUl3bd/NfYcjIt759o++ZyUcoDAACAHTwDMm/ePM2bN29A115yySVav359mmeU3SzLI9M00tp3urqyVJblUSgUSen4AAAAAHq3fPlybd26VYsXL9aFF16ojRs36itf+YqeeOIJXXXVVZme3rDor9bxnA5e7EgsLe9vh6NpGRcAAAC5idt+kHKJftD0nQYAAABGhj179ujXv/617rnnHv3TP/2Tbr75Zj3xxBOaOHGiHnrooUxPb9j0V+tEYo4kyUpTrWJ5uUcTAAAA7yHgQcol+kHTdxoAAAAYGerq6uRyuXTzzTcnH7MsSwsXLtSuXbv0l7/8JYOzGz791Totx4KSpIryUWl5/4rxxZKk2OkgCQAAAPmNFXKkXKIfNH2nAQAAgJFh7969uvDCC+X3+7s9XlNTk3w+H/RX6+xv6gp+pkwak5b3T4wbjRLwAAAAgIAHaUTfaQAAAGBkOHr0qMaNG9fj8cRjra2twz2ljOqr1nnj7ROSpDlTJ6blfWefHjeSploLAAAAuYWAB2lD32kAAABgZAiFQvJ6vT0etywr+Xw+6avW2fbaO+oMRVVdWZpsp5YqFeXFqq4slePEZduRlI4NAACA3ETAg5RL9IOm7zQAAAAwMhQUFCgcDvd43Lbt5PP5oL9a55Qd1X//32FJ0tIbq1P63kvnd41n2xHF4ykdGgAAADmKgAcpl+gHTd9pAAAAYGQYN26cjh492uPxxGNlZWXDPaWMGEit89RvG3QyGNa0qjLVzpyckvetnTlZ06rK5DiOgsGeQRsAAADyEwEPUi7RD5q+0wAAAMDIcNlll+ngwYMKBALdHt+9e7ck6fLLL8/EtIbdQGqd9g5bazfukSR9ZUGNpledW/g1vapMyxbUSJICAVtxtu8AAADgNAIepJxtR+Q4cfpOAwAAACNEbW2tYrGYnn766eRj4XBYv/zlLzV16lRNmDAhg7MbPgOtdV7c9Y42/f6APG5T9y+ZMeSdPLUzJ+uBJTPkdpvq7AzLtqNDnToAAABGINeDDz74YKYngex06lR4yL2dXS5DHo9LE0t9+u//a0rZnP7+/12pCaU+hUIRhcPs4AEAAMgmhmGoqMib6WkgDcrLy7V//379/Oc/VzAYVFNTk7773e9q//79+uEPf6jzzz8/01MclOGodXbta1WR5dEHLh6rj3ywXJdfWKID77yrdwP9t1irKC/W3/+/KzX/Y5UyTUOdnWEFg/bQJgwAAIBzks11jhFnfzf6cPx4QI4ztB8PwzBUUlIk0zS15tnXVLfj0DnPp3bmZH3tpivkOI5OnOikNQEAAECWMU1DY8f6Mz0NpIlt21q9erWee+45vfvuu6qqqtI3vvENzZkzJ9NTG7ThrHXmXHG+7vhcjUb5uhYF6g8c0/bdzdp/uF2NRzpkh6OyvG5VjC/WlEljNHvqRFVXlkqSHMdRIGCzcwcAACCDsrnOIeBBn86l6JEky3Jr1KhCRaOOVm54RTsbWoc81vSqsmRrgpMnT1HgAAAAZKFsLnyAMw13rTPGb+kLn6zSx6+cpKICd7/jJ1pSB4NhbmwDAADIsGyucwh40KdzLXokyeezVFTkVTTqaO3GPUPayVM7c7KWLahJ9p2mNQEAAEB2yubCBzhTpmqdQsutOVecrw9cVKLKC8bogjK/XKYhwzDkOI7C4ZgikZhsOzLkFnIAAABIrWyucwh40KdUFD3Se4WPJO1qaNW6zfVqbOno93UV5cVaOr9a06rKJIlwBwAAIMtlc+EDnIlaBwAAAAOVzXUOAQ/6lKqiR+pqYeD3WzJNUxJ9pwEAAEaibC58gDNR6wAAAGCgsrnOIeBBn1JZ9Ehdh5H6fF5ZlkemafR7PX2nAQAAcks2Fz7Amah1AAAAMFDZXOf0f7ojkCLxeFyBgK1g0JZleeTxuOR2m3K5zOQ1sZijaNSh7zQAAACAnEGtAwAAgEwg4MGwi8elUCiiUCiS6akAAAAAQMpQ6wAAAGA4EfCgTwNpLQAAAAAk8PcjcgU/qwAAABiobP7bkTN4AAAAAAAAAAAAcozZ/yUAAAAAAAAAAADIJgQ8AAAAAAAAAAAAOYaABwAAAAAAAAAAIMcQ8AAAAAAAAAAAAOQYAh4AAAAAAAAAAIAcQ8ADAAAAAAAAAACQYwh4AAAAAAAAAAAAcgwBDwAAAAAAAAAAQI4h4AEAAAAAAAAAAMgxBDwAAAAAAAAAAAA5hoAHAAAAAAAAAAAgxxDwAAAAAAAAAAAA5BgCHgAAAAAAAAAAgBxDwAMAAAAAAAAAAJBjCHgAAAAAAAAAAAByDAEPAAAAAAAAAABAjiHgAQAAAAAAAAAAyDEEPAAAAAAAAAAAADmGgAcAAAAAAAAAACDHEPAAAAAAAAAAAADkGAIeAAAAAAAAAACAHEPAAwAAAAAAAAAAkGMIeAAAAAAAAAAAAHIMAQ8AAAAAAAAAAECOIeABAAAAAAAAAADIMQQ8AAAAAAAAAAAAOYaABwAAAAAAAAAAIMcQ8AAAAAAAAAAAAOQYAh4AAAAAAAAAAIAcQ8ADAAAAAAAAAACQYwh4AAAAAAAAAAAAcgwBDwAAAAAAAAAAQI4h4AEAAAAAAAAAAMgxBDwAAAAAAAAAAAA5hoAHAAAAAAAAAAAgxxDwAAAAAAAAAAAA5BgCHgAAAAAAAAAAgBxDwAMAAAAAAAAAAJBjCHgAAAAAAAAAAAByDAHPED366KOqqqrSpz/96T6vOXnypGbNmqWqqirV1dUN4+wAAAAAAAAAAMBIRsAzBC0tLVq7dq2KiorOet3DDz+sUCg0TLMCAAAAAAAAAAD5goBnCL7//e9r6tSpqq6u7vOaffv26Re/+IWWLl06jDMDAAAAAAAAAAD5gIBnkF599VVt3bpV995771mv+853vqPrrrtOV1111TDNDAAAAAAAAAAA5At3pieQS2KxmFauXKmFCxeqqqqqz+u2bNmiXbt26Te/+Y3eeeedYZxharW1BeU48UxPAwAAADnCNA2dd54v09MA+kWtAwAAgIHK5jqHgGcQnnrqKTU3N+tnP/tZn9eEQiH94Ac/0O23364LLrggpwMex4lT9AAAAAAYcah1AAAAMBIQ8AxQW1ubHn74Yd15550qKSnp87rHHntMkUhEy5YtG8bZAchWhiFZlkcej0tutymX673OmLGYo2jUUSQSk21HFGeNAQAAAAAAAOiG9bW+cQbPAK1evVqjR4/WokWL+rymqalJ69ev19133y2fLzu3bAEYHoZhyO+3VFLiV3FxgQoKPHK7XTIMI/nldrtUUOBRcXGBSkr88vstGYaR6akDAIA89uijj6qqqkqf/vSnezy3c+dO3XLLLZo6dao++tGPatWqVQoGgxmYJQAAAPIB62v9YwfPABw8eFDPPPOM7r33XrW2tiYft21bkUhETU1N8vv9evjhhzV+/Hh95CMfUVNTkyTp2LFjkqQTJ06oqalJEydOlGmSqwEjmWW55fdbyX/X6w8c07bdzdp/uF2NLSdlR2KyPC5VlI/SlEljNGfqRFVXlqqw0CvLcisQsGXb0Qx/CgAAkG9aWlq0du1aFRUV9Xhu7969uv3221VZWanly5erpaVFGzZs0MGDB7Vu3boMzBYAAAAjGetrA2PE4/m2aWnwXnnlFS1evPis1yxevFhvvvmm/vd///es17366qsaNWpUKqeXNsePB+hLDQySz2epqMgrSdrV0Kp1m+rVeKSj39dVlBdr6fxqTasqkyR1doYVDNppnSsAAKlmmobGjvVnehoYorvvvlsnTpyQ4zhqa2vTr371q+Rzf/M3f6O9e/eqrq5Ofn/X9/jZZ5/V/fffr/Xr12v27NmZmvaQUOsAAABkr2xbX8vmOoeAZwBOnDihnTt39nh89erVCgaDuu+++zRp0iR1dHSovb292zX79u3TT37yEy1dulTTpk3TNddcI4/HM1xTPycUPcDgJH75RKOO1m7co7odhwY9Ru3MyVq2oEZutyTPcikAACAASURBVEnIAwDIOdlc+ODsXn31Vd12223auHGjVq1a1S3gCQQCmjFjhm677Tb94z/+Y/I14XBYM2bM0A033KDvfOc7mZr6kFDrAAAAZKdsXF/L5jqHFm0DUFJSouuuu67H40888YQk9fpcQnFxsSTpQx/60FmvA5DbLMud/OWzcsMr2tnQ2v+LelG345Ba207pgSUzTo8Xy4vtpAAAIHNisZhWrlyphQsXqqqqqsfzDQ0Nikajqq6u7va41+vV5Zdfrr179w7XVAEAADCCsb42eBwGAwDnKHHgmySt3bhnyL98EnY2tGrtxj2SlHcHwwEAgOH31FNPqbm5WXfddVevzx89elSSVFZW1uO5cePGdTunFAAAABgK1teGhh085+DJJ5/s95oZM2aooaFhGGYDIFN8Pq9M09TOhtYhbRvtTd2OQ7q6ZqKmVZXJ5/MqEKBVGwAASL22tjY9/PDDuvPOO1VSUtLrNaFQSFLXjp33sywr+TwAAAAwVKyvDQ07eADgHBiGZFld52qt31Sf0rHXbe4az7I8GqE3GQAAgAxbvXq1Ro8erUWLFvV5TUFBgaSuM3fez7bt5PMAAADAULC+NnQEPABwDizLI9M0VH/gmBqPdKR07MaWDtUfOCbTNJK/5AAAAFLl4MGDeuaZZ3TrrbeqtbVVTU1Nampqkm3bikQiampqUnt7u8aNGydJvbZiO3r0aK+t2wAAAICBYn1t6Ah4AOAceDwuSdK23c1pGX/76XET7wMAAJAqR44ckeM4WrVqlebOnZv82r17tw4ePKi5c+dqzZo1uvTSS+V2u1Vf3/1uynA4rL179+qyyy7L0CcAAADASMD62tBxBg8AnAO3uysn33+4PS3jJ8ZNvA8AAECqXHLJJVqzZk2Px1evXq1gMKj77rtPkyZNUnFxsWbNmqXNmzfrzjvvlN/vlyRt2rRJnZ2dqq2tHe6pAwAAYARhfW3oCHgA4By4XF2/GBpbTqZl/MS21MT7AAAApEpJSYmuu+66Ho8/8cQTktTtubvvvltf+MIXdOutt+rzn/+8Wlpa9Pjjj2v27Nn62Mc+NmxzBgAAwMjD+trQEfAAQArYkVh6xg1H0zLucEoclOfxuOR2m91+mcZijqJRR5FITLYdUTyewYkCAIA+ffCDH9Tjjz+uhx56SN/97nfl8/m0cOFC3XPPPZmeGgAAAEYI1tcGj4AHQMaMpIV/y+NSKJz6X0KWN3f/M20Yhnw+b/KgvN643S653S4VFHjk81my7YiCwbDi2f4NBwBgBHvyySd7ffyqq67SU089NcyzAQAAQL5gfW3wRu4nA5C1RtLCfyzmyO12qaJ8lPY1tqV8/Irxxcn3ySWW5Zbfb8k0u0K7+gPHtG13s/Yfbldjy0nZkZgsT9c/tymTxmjO1ImqrixVYaFXluVWIGDLtkfu3RUAAAAAAADowvra0BHwABhWI23hPxrt+gU0ZdKYtPwCmjJpTPJ9coXPZ6moyCtJ2tXQqnWb6pO9Ts8UCse0r7FN+xrb9JuX3lZFebGWzq/WtKoyjRpVqM7OsIJBe7inDwAAAAAAgGHE+trQEfAAGDYjceE/EompoMCjOVMn6jcvvZ3y8WdPnZh8n1yQ+B5Ho47Wbtyjuh2HBvzaxpYOrXjsZdXOnKxlC2qSPyvZ8r0GAAAAAABA6rG+NnRm/5cAwLk7c+F/zbOvacVjL/ca7vQmsfC/5tnXFI06Kiryyuez0jzjgbHtiBwnrurK0uR2z1SpKC9WdWWpHCcu246kdOx0sCx38nu8csMrgwp3zlS345BWbngl+b22LO5FAAAAAAAAGKlYXxs6Ah4AaTeSF/7jcSV/OSy9sTqlYy+d3zWebUeUZUcP9WAYhvz+rtBt7cY92tnQek7j7Wxo1dqNeyRJfr8lw+j9rCYAAAAAAADkNtbXho6AB0Ba5cPCfzAYluM4mlZVptqZk1MyZu3MyZpWVSbHcRQMhlMyZjr5fF6ZpqmdDa1DDvDer27HIe1qaJVpmvL5vCkZEwAAAAAAANmH9bWhIeABkFb5sPAfj8cVCHSdE7NsQY2mV5Wd03jTq8q0bEGNJCkQsBXP8tsLDEOyLI8kaf2m+pSOvW5z13iW5VEWZHkAAAAAAABIg3xfXxsqAh4AaZNPC/+2HVVnZ1hut6kHlswY8p0GtTMn64ElM+R2m+rsDMu2oymeaepZlkemaaj+wLEBn6s0UI0tHao/cEymaSR/lgAAAAAAADDy5PP62lBl/gALACPWcCz8V1eWyrI8CoUyf0haMNh1l0FRkVdfu+kKXV0zUes216uxpf/PXlFerKXzqzXt9N0JnZ3h5HjZzuNxSZK27W5Oy/jbdzerurJUHo8rK77PAAAAAAAASI98XV8bKgIeAGmTjwv/waCtaDQmv9/StKoyrfnmtao/cEzbdzdr/+F2NR7pkB2OyvK6VTG+WFMmjdHsqRNVXVkqSXIcR4GAnVN3FrjdXZtB9x9uT8v4iXET7wMAAAAAAICRKx/X14aKgAdA2uTrwr9tRxUOx+TzeWVZHlVXliZ/wfTFceKy7YiCwXDO9QR1ubr++Te2nEzL+IndX4n3AQAAAAAAwMiWb+trQ0XAAyBt8nnhP3EwXDBoy7I88nhccrvNbnONxRxFo44ikZhsO6Jc/71jR2LpGTc88u+2AAAAAAAAQHf5uL42WAQ8ANIunxf+43EpFIpkTQu5dLI8LoXCqf9eW15+VQEAAAAAAOSrfFpfG6zsu+0dwIhjnT6LJ+XjsvCfFWIxR5JUUT4qLeNXjC/u9j4AAAAAAAAACHgApBEL//khGu365z9l0pi0jJ8YN/E+AAAAAAAAAAh4AKQRC//5IXK6Bd+cqRPTMv7s0+NG0tTqDwAAAAAAAMhFBDwA0oaF//xg2xE5TlzVlaXJXVWpUlFerOrKUjlOXLZNn1UAAAAAAAAggYAHQNqw8J8f4nElvwdLb6xO6dhL53eNZ9sRxeMpHRoAAAAAAADIaQQ8ANKGhf/8EQyG5TiOplWVqXbm5JSMWTtzsqZVlclxHAWD4ZSMCQAAAAAAAIwUBDwA0oqF//wQj8cVCNiSpGULajS9quycxpteVaZlC2okSYGArTgpHgAAAAAAANANAQ+AtGLhP3/YdlSdnWG53aYeWDJjyIFe7czJemDJDLndpjo7w7LtaIpnCgAAAAAAMskwpIICj4qLC3TeeUUqLfUnv847r0jFxQUqKPDIMDI9UyC7GXFWR9GH48cDchx+PJAaPp+loiKvolFHazfuUd2OQ4Meo3bmZC1bUJNc+A8G7TTMFOcq8b2WpF0NrVq3uV6NLR39vq6ivFhL51dr2ukQkO8xAOQe0zQ0dqw/09MA+kWtAwBAZhiGIZ/PK8vyyDT7T28SZy8Hg2Fu8kXGZHOdQ8CDPlH0INVY+M8fluWW32/JNLs2itYfOKbtu5u1/3C7Go90yA5HZXndqhhfrCmTxmj21ImqriyVJDmOo0DAZucOAOSgbC58gDNR6wAAMPx6WyvYllgraDkpOxKT5XGponyUpkwaozmsFSBLZHOdQ8CDPlH0IB1Y+M8f3JUDAPknmwsf4EzUOgAADK8eN/1uqlfjEW76RW7I5jqHgAd9ouhBurDwn18MQ7Isjzwel9xuUy7Xe8e/xWKOolFHkUhMth0R314AyG3ZXPgAZ6LWAQBg+NC2H7kum+scd6YnACD/xONxBQK2gkGbhf88EI9LoVBEoVAk01MBAAAAAADDyLLcyXBn5YZXtLOhdUjj1O04pNa2U3pgyYzT48Xo8AKIgAdABrHwDwAAAAAAMDIZhiG/35Ikrd24Z8jhTsLOhlat3bhHX7vpCvn9lsLhGJ1ekPfM/i8BAAAAAAAAAGDgfD6vTNPUzobWIbVl603djkPa1dAq0zTl83lTMiaQywh4AAAAAAAAAAApkziPV5LWb6pP6djrNneNZ1keGf0f7QyMaAQ8AAAAAAAAAICUsSyPTNNQ/YFjajzSkdKxG1s6VH/gmEzTSIZIQL4i4AEAAAAAAAAApIzH45IkbdvdnJbxt58eN/E+QL5yZ3oCAAAAAIDM+POf/6yf/vSn+tOf/qRjx46poKBAU6ZM0Ze//GVde+21yeuWL1+ujRs39nj9RRddpLq6uuGcMgAAyAFud9e+gv2H29MyfmLcxPsA+YqABwAAAADyVHNzs4LBoBYsWKCysjKdOnVKzz//vL761a/q29/+tm6++ebktV6vV6tWrer2+uLi4uGeMgAAyAEuV1fw0thyMi3jJ9q+Jd4HyFcEPAAAAACQp6655hpdc8013R5btGiRPve5z+nxxx/vFvC43W7deOONwz1FAACQw+xILD3jhqNpGRfINUScAAAAAIAkl8ulCRMmqKOj54HIsVhMgUAgA7MCAAC5yErTGTmWl30LgMQOHgAAAADIe52dnQqFQgoEAnrhhRf04osv6lOf+lS3a06dOqUrr7xSp06d0ujRozVv3jz9wz/8g3w+X4ZmDQAAslUs5sjtdqmifJT2NbalfPyK8cXJ9wHyGQEPAAAAAOS5733ve3r66aclSaZp6hOf+IRWrFiRfH7cuHFaunSpPvCBDygej2vbtm36j//4D7355pt68skn5XZTWgIAgPdEo10Bz5RJY9IS8EyZNCb5PkA+469wAAAAAMhzt912m2pra9Xa2qotW7bIcRxFIpHk83//93/f7fp58+bpwgsv1L/8y79o69atmjdv3nBPGQAAZLFIJKaCAo/mTJ2o37z0dsrHnz11YvJ9gHzGGTwAAAAAkOcqKyt19dVX67Of/azWrl2rzs5O3XHHHYrH432+5vbbb5dpmvrDH/4wjDMFAAC5wLYjcpy4qitLk+3UUqWivFjVlaVynLhsO9L/C4ARjIAHAAAAANDN9ddfr9dff11vv933HbcFBQUaM2aM3n333WGcGQAAyAXxuJLhy9Ibq1M69tL5XePZdkRnuRcFyAsEPAAAAACAbkKhkCQpEAj0eU0gEFBbW5tKSkqGa1oAACCHBINhOY6jaVVlqp05OSVj1s6crGlVZXIcR8FgOCVjArmMgAcAAAAA8tTx48d7PBaJRLRp0yYVFBSosrJStm33GvQ88sgjisfjmjNnznBMFQAA5Jh4PK5AwJYkLVtQo+lVZec03vSqMi1bUCNJCgTss7aSBfKFO9MTAAAAAABkxooVKxQIBPThD39Y48eP19GjR/Xcc8/prbfe0vLly+Xz+dTU1KQFCxZo3rx5uvjiiyVJ27dv1+9//3vNmTNHc+fOzfCnAAAA2cq2o+rsDKuoyKsHlszQ2o17VLfj0KDHqZ05WcsW1MjtNtXZGZZtR9MwWyD3GHGiTvTh+PGAHIcfDwAAAAyMaRoaO9af6WlgEH7961/rP//zP7Vv3z61t7fL5/Ppgx/8oBYtWpQMbk6ePKmVK1dq9+7dam1tVSwW0+TJk/WZz3xGS5YskcfjyfCnGDxqHQAAhpfPZ6moyCtJ2tXQqnWb69XY0tHv6yrKi7V0frWmnd7909kZVjBop3WuwPtlc51DwIM+UfQAAABgMLK58AHORK0DAMDwsyy3/H5Lptl1akj9gWPavrtZ+w+3q/FIh+xwVJbXrYrxxZoyaYxmT52o6spSSZLjOAoEbHbuICOyuc6hRRsAAAAAAAAAIK1sO6pwOCafzyvL8qi6sjQZ4PTFceKy7YiCwXCfZ+4YhmRZHnk8Lrndplyu946dj8UcRaOOIpGYbDsitjpgpCHgAQAAAAAAAACkXTweVyBgKxi0zzmUMQwjGRaZptHrNW63S263SwUFHvl8Vr9hEZBrCHiG4NFHH9Xq1at1ySWX6Fe/+lXy8X/913/VCy+8oMbGRgWDQU2YMEHXXHONvvrVr6qkpCSDMwYAAAAAAACA7BCPS6FQRKFQZEiv763d27ZEu7eWk7IjMVkelyrKR2nKpDGac7rdW2GhV5blpt0bRgzO4BmklpYW1dbWyjAMnX/++d0Cnr/7u7/Teeedp4svvlg+n09vvfWWnnnmGY0dO1b/9V//paKiogzOfPDoSw0AAIDByObe1MCZqHUAAMhdPp+loiKvJGlXQ6vWbapX45GOfl9XUV6spfOrNa2qTJLU2RlWMGinda4YGbK5ziHgGaS7775bJ06ckOM4amtr6xbw9Gbr1q36+te/rh//+MeaN2/eMM0yNSh6AAAAMBjZXPgAZ6LWAQAgNyXCnWjU0dqNe1S349Cgx6idOVnLFtTI7TYJeTAg2VznmP1fgoRXX31VW7du1b333jvg15x//vmSpJMnT6ZrWgAAAAAAAAAwolmWOxnurNzwypDCHUmq23FIKze8omjUUVFRV8s2IFcR8AxQLBbTypUrtXDhQlVVVfV5XTwe14kTJ3T06FH98Y9/1KpVq+RyufSRj3xkGGcLAAAAAAAAACODYRjy+y1J0tqNe7SzofWcxtvZ0Kq1G/dIkvx+S4ZhnPMcgUwgnhygp556Ss3NzfrZz3521uuOHTum2bNnJ/9/eXm5HnroIVVWVqZ5hgAAAAAAAAAw8vh8XpmmqZ0NrUPeufN+dTsO6eqaiZpWVSafz6tAgFZtyD0EPAPQ1tamhx9+WHfeeadKSkrOeu3o0aP1+OOPy7ZtvfHGG/rtb3+rzs7OYZopAAAAAAAAAIwchiFZlkeStH5TfUrHXre5Xmu+ea0sy6Ng0Ban1SPXEPAMwOrVqzV69GgtWrSo32u9Xq+uvvpqSdLHP/5xzZo1S7fccovGjh2rj3/84+meKgAAAAAAAACMGJblkWkaqj9wTI1HOlI6dmNLh+oPHFN1Zaksy6NQKJLS8YF04wyefhw8eFDPPPOMbr31VrW2tqqpqUlNTU2ybVuRSERNTU1qb2/v8/XTp0/XuHHj9Nxzzw3jrAEAAAAAAAAg93k8LknStt3NaRl/++lxE+8D5BJ28PTjyJEjchxHq1at0qpVq3o8P3fuXC1evFj33Xdfn2OEw2F1dKQ2XQYAAAAAAACAkc7t7tqjsP9w3zfZn4vEuIn3AXIJAU8/LrnkEq1Zs6bH46tXr1YwGNR9992nSZMmqbOzU4ZhqLCwsNt1W7du1bvvvqvq6urhmjIAAAAAAAAAjAguV1fw0thyMi3jJ9q+Jd4HyCUEPP0oKSnRdddd1+PxJ554QpKSz+3du1e33367brjhBl188f9n796jo6rP/Y9/9lyyk8wkhBATRAlqwKBNA9FakEpR1ErFclOs/mqpUipW7DnayzqiR08Fa63VVtoDHjig9ai1qFylCh7FKsilKkhOSgWhYsAAIUAkM0n2XH9/xExBLrnNZSd5v9ZiLZnZ8+wHZy/Idz/7+T7nyOFwqKKiQsuXL9cZZ5yhSZMmJTVvAAAAAAAAAOgqrGA4MXEDoYTEBZKBAk+cFBQU6KqrrtKGDRu0dOlSBYNBnXHGGfrOd76j2267TT179kx1igAAAAAAAADQKZlupxoD8S/ymGncIkfnxdXbTs8888wxv8/NzdWMGTNSlA0AAAAAAAAAdD3hcEQul1OFvbO1vfJw3OMXFmTFzgN0NmwsCAAAAAAAAACwpVCoqfDSv29OQuI3x20+D9CZUOABAAAAAAAAANhS8PPZO8MH9UlI/Es+jxtM0IwfIJEo8AAAAAAAAAAAbMmygopEoiopyottpxYvhb2zVFKUp0gkKssKxjU2kAwUeAAAAAAAAAAAthSNKlZ8mTK2JK6xp4xpimdZQUWjcQ0NJAUFHgAAAAAAAACAbfn9AUUiEZUV52vU0H5xiTlqaD+VFecrEonI7w/EJSaQbBR4AAAAAAAAAAC2FY1G5fNZkqSp40t1QXF+h+JdUJyvqeNLJUk+n6Uo7TvopCjwAAAAAAAAAABszbJCqq8PyOVy6L7JQ9rdyTNqaD/dN3mIXC6H6usDsqxQnDMFkseIUp7ESRw86FMkwuUBAACA1nE4DPXq5U11GkCLWOsAANB5eTymMjPTJEmbt1Vr/vIKVe6ra/Fzhb2zNGVMico+7/6prw/I77cSmiu6Bjuvcyjw4KRY9AAAAKAt7LzwAY7GWgcAgM7NNF3yek05HE0bVFXsrNHaLVXasbtWlfvrZAVCMtNcKizIUv++ObpkUB+VFOVJkiKRiHw+i84dtJqd1zmuVCcAAAAAAAAAAEBrWVZIgUBYHk+aTNOtkqK8WAHnZCKRqCwrKL8/wMwddBkUeAAAAAAAAAAAnUo0GpXPZ8nvt2SabrndTrlcDjmd/xw7Hw5HFApFFAyGZVlBUddBV0OBBwAAAAAAAADQKUWjUmNjUI2NwVSnAiSdo+VDAAAAAAAAAAAAYCcUeAAAAAAAAAAAADoZCjwAAAAAAAAAAACdDDN4AAAAAKCb+uijj/T73/9ef/vb31RTU6P09HT1799f3//+9zVy5Mhjjt25c6ceeughbdq0SW63WyNGjND06dOVm5ubouwBAACA7o0CDwAAAAB0U1VVVfL7/Ro/frzy8/PV0NCg1157TT/84Q81Y8YMffvb35Yk7du3T9/5zneUlZWlu+66S/X19XryySe1fft2vfjii0pLS0vxnwQAAADofoxoNBpNdRKwp4MHfYpEuDwAAADQOg6HoV69vKlOAx0UDoc1YcIEWZallStXSpJ+/vOfa8mSJXr11VfVp08fSdK6det0yy23HFMI6ixY6wAAAKC17LzOYQYPAAAAACDG6XTq9NNPV11dXey11157TZdeemmsuCNJw4YN01lnnaVXX301FWkCAAAA3R5btAEAAABAN1dfX6/Gxkb5fD6tXr1ab7/9tr75zW9Kkvbv36+DBw+qpKTkuM+Vlpbq7bffTna6AAAAAESBBwAAAAC6vYcfflgLFy6UJDkcDl155ZW6//77JUnV1dWSpNNOO+24z5122mmqra1VIBBgDg8AAACQZBR4AAAAAKCb+973vqdRo0apurpar776qiKRiILBoCTJsixJOmEBxzRNSVJjYyMFHgAAACDJmMEDAAAAAN1cUVGRhg0bpnHjxmnu3Lmqr6/Xbbfdpmg0GiviBAKB4z7XXPxJT09Par4AAAAAKPAAAAAAAL7gqquu0v/93//p448/Vn5+viTpwIEDxx134MAB5eTk0L0DAAAApABbtAEAAAAAjtHY2ChJ8vl8Ouecc5Sbm6uKiorjjisvL9fAgQOTnR4AAG1iGJJpuuV2O+VyOeR0/vOZ93A4olAoomAwLMsKKhpNYaIA0EZ08AAAAABAN3Xw4MHjXgsGg1q2bJnS09NVVFQkSfrGN76hv/zlL9q7d2/suPXr12vXrl0aNWpU0vIFAKAtDMOQ12sqN9errKx0pae75XI5ZRhG7JfL5VR6ultZWenKzfXK6zVlGEaqUweAVjGiUerSOLGDB32KRLg8AAAA0DoOh6FevbypTgNtMG3aNPl8Pl100UUqKCjQgQMH9PLLL+sf//iH7r77bt1yyy2SpL1792rcuHHKzs7WpEmTVF9frwULFqigoECLFi3qdFu0sdYBgK7PNF3yek05HE3Pt1fsrNGaLVXasbtWlfuOyAqGZbqdKuydrf59czR8UB+VFOVJkiKRiHw+S5YVSuUfAYBN2HmdQ4EHJ8WiBwAAAG1h54UPTuzPf/6zXnrpJW3fvl21tbXyeDz60pe+pJtuukmXX375Mcd+9NFHevjhh/X+++/L7XZrxIgRuvvuu5WXl5ei7NuPtQ4AdG0ej6nMzKaHDzZvq9b8ZRWq3F/X4ucKe2dpypgSlRU3zZ+rrw/I77cSmisA+7PzOocCD06KRQ8AAADaws4LH+BorHUAoOtqLu6EQhHNXVKulRs+aXOMUUP7aer4UrlcDoo8AGy9zmEGDwAAAAAAAIBOzzRdseLOzCc3tqu4I0krN3yimU9uVCgUUWZmmkzTFedMASA+KPAAAAAAAAAA6NQMw5DXa0qS5i4p16Zt1R2Kt2lbteYuKZckeb2mDMPocI4AEG8UeAAAAAAAAAB0ah5PmhwOhzZtq253584XrdzwiTZvq5bD4ZDHkxaXmAAQTxR4AAAAAAAAAHRahiGZpluStGBZRVxjz1/eFM803aKJB4DdUOABAAAAAAAA0GmZplsOh6GKnTWq3F8X19iV++pUsbNGDocRKyIBgF1Q4AEAAAAAAADQabndTknSmi1VCYm/9vO4zecBALugwAMAAAAAAACg03K5mm5x7thdm5D4zXGbzwMAdsHfSgAAAAAAAAA6Laez6RZn5b4jCYnfvO1b83kAwC74WwkAAAAAAABAp2cFw4mJGwglJC4AdBQFHgAAAAAAAACdnpmgGTlmmishcQGgoyjwAAAAAAAAAOi0wuGIJKmwd3ZC4hcWZB1zHgCwC1uXnzdt2qRt27bpyJEjCgaDpzz2jjvuSFJWAAAAAAAAAOwiFIrI5XKqf98cba88HPf4/fvmxM4DAHZiywLPW2+9pQceeEB79+5t9Wco8AAAAAAAAADdTzAYVnq6W8MH9dEr73wc9/iXDOoTOw8A2IntCjzr16/X7bffrnC46S/Mfv36qVevXnI6E7OHJgAAAAAAAIDOy7KC8nhMlRTlqbAgS5X76+IWu7B3lkqK8hSJRGVZp95hCACSzXYFntmzZyscDuvLX/6yfvOb36hv376pTgkAAAAAAACATUWjTUWejIw0TRlbovvnrY9b7CljSiQ1xY9G4xYWAOLCkeoEvuhvf/ubDMPQY489RnEHAAAAAAAAQIv8/oAikYjKivM1ami/uMQcNbSfyorzFYlE5PcH4hITAOLJdgUet9stj8ejwsLCVKcCAAAAAAAAoBOIRqPy+SxJ0tTxpbqgOL9D8S4oztfU8aWSJJ/PUpT2HQA2ZLsCzznnnKPGxkZZlpXqVAAAAAAAAAB0EpYVUn19QC6XQ/dNHtLuTp5RQ/vpvslD5HI5VF8fkGWF4pwpAMSH7Qo8N9xwg0KhkJYvX57qVAAAOChSUAAAIABJREFUAAAAAAB0In6/FSvyTJs4WDNuvViFvbNa9dnC3lmacevFmjZxcKy44/fzEDoA+zKiNuwvvPvuu7Vq1So9+OCDGj16dKrT6bYOHvQpErHd5QEAAACbcjgM9erlTXUaQItY6wBA12eaLnm9phyOpufbK3bWaO2WKu3YXavK/XWyAiGZaS4VFmSpf98cXTKoj0qK8iRJkUhEPp9F5w4ASfZe59iywCNJv//97zVnzhydfvrpKikpkcfjOemxhmHooYceSmJ23QOLHgAAALSFnRc+wNFY6wBA92AYhjyeNJmmWw6H0eLxkUhUlhWU3x9g5g6AGDuvc2xZ4HnhhRf0yCOPyO/3n/IvU8MwFI1GZRiG/v73vycxw+6BRQ8AAADaws4LH+BorHUAoHsxDMk03XK7nXK5HHI6/zm1IhyOKBSKKBgMy7KCst+dUgCpZud1jivVCXzR66+/rvvvv1+SlJGRobKyMvXq1Usul+1SBQAAAAAAAGBz0ajU2BhUY2Mw1akAQFzZrmoyf/58SdLw4cP129/+Vl6vPStjAAAAAAAAAAAAqeJo+ZDk+uijj2QYhn7xi19Q3AEAAAAAAAAAADgB23XwuFwuZWVlKT8/P9WpnNQTTzyhxx9/XAMGDNCKFSskSQ0NDVq8eLHeeOMNbd++XX6/X/369dP111+vb3/723I6nSnOGgAAAAAAAEgO5t4AQOLZroNn4MCB8vv98vl8qU7lhPbt26e5c+cqMzPzmNd3796tmTNnKhqN6uabb9a//du/6cwzz9QDDzyge+65J0XZAgAAAAAAAMljGIa8XlO5uV5lZaUrPd0tl8spwzBiv1wup9LT3crKSldurlderynDMFKdOgB0OkY0aq8a+RtvvKFp06bpxz/+sW699dZUp3Ocu+66S4cOHVIkEtHhw4djHTyHDh3SwYMHNWDAgGOOnz59uhYvXqzXXntN/fr1S0XK7XbwoE+RiK0uDwAAANiYw2GoVy+2WYb9sdYBgMQwTZe8XlMOR9Mz5RU7a7RmS5V27K5V5b4jsoJhmW6nCntnq3/fHA0f1EclRXmSpEgkIp/PkmWFUvlHAIDj2HmdY7st2i6//HJNmzZNv/vd7yRJkyZNUnp6eoqzavLuu+9q1apVWrJkiR588MFj3svNzVVubu5xn7nyyiu1ePFi7dy5s9MVeBKFFt3Oi+8OAAAAAACciMdjKjMzTZK0eVu15i+rUOX+uuOOawyEtb3ysLZXHtYr73yswt5ZmjKmRGXF+crOzlB9fUB+v5Xs9AGgU7JdgWfSpEmSpIyMDP32t7/VnDlzVFRUJI/Hc9LPGIahp59+OqF5hcNhzZw5U9ddd52Ki4tb/bmamhpJUs+ePROVWqdhGIY8njSZplsOx4nbbl0uZ6xN1+MxZVlB+f0B2azRrNvhuwMAAAAAACfTXNwJhSKau6RcKzd80urPVu6r0/3z1mvU0H6aOr40ViSiyAMALbNdgeevf/3rMb9vbGzU3/72t1N+Jhl7dP7pT39SVVWV/vCHP7T6M4FAQE8//bTOPPNMffnLX05ccp1Ae1t0MzLSZJouWnRTiO8OAAAAAACcjGm6YsWdmU9u1KZt1e2Ks3LDJ6o+3KD7Jg/5PF6Y+wkA0ALbFXjuuOOOVKdwnMOHD+t3v/udbr/99hNuw3YyM2fO1I4dOzRv3jy5XLb7X500tOh2Xnx3AAAAXVt5ebmWLl2qjRs36tNPP1VOTo4GDRqkO++8U2effXbsuLvvvltLliw57vNnn322Vq5cmcyUAQA2YhiGvF5TkjR3SXm7izvNNm2r1twl5Zo2cbC8XlOBQJidQQDgFGxXdbBjgefxxx9Xjx49dNNNN7X6M/Pnz9cLL7ygf/3Xf9WIESMSmJ290aLbefHdAQAAdH3z58/Xpk2bNGrUKBUXF+vAgQN67rnnNGHCBC1cuFDnnntu7Ni0tLTjZpFmZWUlO2UAgI14PGlyOBzatK26TfcNTmXlhk80rLSPyorz5fGkyefjXgIAnIztCjx2s2vXLr3wwgu65557VF39z6cQLMtSMBjUnj175PV6lZOTE3tv8eLFevTRR3XDDTfo9ttvT0XatkCLbufFdwcAANA93HzzzXr00UeVlpYWe+3qq6/Wt771Lc2bN0+PPvpo7HWXy6WxY8emIk0AgA0ZhmSabknSgmUVcY09f3mFZv9spEzTLb/fEk08AHBijlQnYHf79+9XJBLRgw8+qMsvvzz2a8uWLdq1a5cuv/xyzZ49O3b866+/rn//93/XN77xDf3Hf/xHCjNPrUS16EqS12smZe5Sd8V3BwAA0H1ccMEFxxR3JOmss87SgAED9I9//OO448PhsHw+X7LSAwDYmGm65XAYqthZc8Lt3Duicl+dKnbWyOEwYkUkAMDx6OBpwYABA44p4DR7/PHH5ff7de+996pv376SpHfffVc//vGP9ZWvfEWPPvpobCh9d0SLbufFdwcAANC9RaNR1dTUaMCAAce83tDQoAsvvFANDQ3q0aOHRo8erZ/+9KfyeDwpyhQAkEput1OStGZLVULir91SpZKiPLndTjU2BhNyDgDo7GxX4DnvvPPa/BnDMLR169YEZCPl5ubqiiuuOO71p59+WpJi73366af64Q9/KMMwdNVVV+nVV1895vji4mINHDgwITnaDS26nRffHQAAAJYvX679+/frX/7lX2KvnXbaaZoyZYrOP/98RaNRrVmzRn/84x/14Ycf6plnnpHLZbulJQAgwVyupgebd+yuTUj85rjN5wEAHM92P4VHO+ld3z179qiurqkddcaMGce9f8cdd3SbAk8yWnRLivJkmm6e4IgzvjsAAIDk8fl8sixLOTk5cjqdqU5HkrRz507NmDFDZWVlGj9+fOz1n/zkJ8ccN3r0aJ111ln67W9/q1WrVmn06NHJThUAkGJOZ1PhpXLfkYTEb74v0XweAMDxbFfg+Z//+Z9Tvl9XV6fy8nK98MILikajuv/++5WXl5ek7P7pmWeeOeb3Q4YM0bZt25Kehx3Rott58d0BAAAkxp49e7R27Vq9++672rx5sw4cOKBQKBR7PysrS+ecc46++tWv6qKLLtKwYcOSXvQ5cOCApk6dqqysLM2aNavF8998882aNWuW1q1bR4EHALoxKxhOTNxAqOWDAKCbs12B56tf/WqLx1x++eWaNGmSJk2apN///vdatGhREjJDa9Gi23nx3QEAAMRPJBLR66+/roULF2r9+vWKRqMn3bHgyJEj+uCDD7Rlyxb993//t3r16qVrr71WEydO1JlnnpnwXOvq6vSDH/xAdXV1eu6551RQUNDiZ9LT05WTk6PPPvss4fkBAOzLdDvVGIh/kcdMs91tSwCwnU77N2WvXr10//3363vf+57mzp2ru+66K9Up4XO06HZefHcAAADx8frrr+uxxx7Trl27YkWdwsJClZaW6vzzz1fPnj3Vo0cPpaenq7a2Vp999pn27Nmj8vJyVVRUqKamRvPmzdOCBQs0ceJE/ehHP1Jubm5CcrUsS7fddpt27dqlp556Sv3792/V53w+nw4fPpywvAAA9hYOR+RyOVXYO1vbKw/HPX5hQVbsPACAE+u0BR6pqdvHNE2tWrWKAo8N0aLbefHdAQAAtN93v/tdvffee4pGoxo4cKDGjBmja665Rvn5+a36fCQS0fr167V8+XK9/vrrev755/Xyyy/rkUce0ciRI+Oaazgc1p133qkPPvhAc+bMUVlZ2XHHWJalYDAor9d7zOtz5sxRNBrV8OHD45oTAKBzCIWaCjz9++YkpMDTv29O7DwAgBPr1AUewzDkcDi0d+/eVKeCE6BFt/PiuwMAAGi/d999V5dccol+9KMfadCgQW3+vMPh0Ne+9jV97WtfU0NDg5555hk99dRT2rp1a9wLPA8//LBWr16tyy67TLW1tVq2bNkx748dO1YHDhzQ+PHjNXr0aJ1zzjmSpLVr1+qtt97S8OHDdfnll8c1JwBA5xAMhpWe7tbwQX30yjsfxz3+JYP6xM4DADixTn23taKiQg0NDerRo0eqU8FRaNHtvPjuAAAAOm7hwoXtKuycSEZGhm699VbddNNN+vTTT+MS82gffvihJOnNN9/Um2++edz7Y8eOVXZ2ti699FKtW7dOS5cuVTgcVr9+/fTjH/9YkydPlsPB9rsA0B1ZVlAej6mSojwVFmTFtmWPh8LeWSopylMkEpVlBeMWFwC6mk5b4CkvL9fdd98twzB0wQUXpDodHIUW3c6L7w4AAKDj4lXcOVpmZqYGDBgQ97jPPPNMi8dkZ2fr17/+ddzPDQDo3KLRpiJPRkaapowt0f3z1sct9pQxJZKa4n8+yg4AcAK2K/BMmjTplO8HAgHt3btX1dXVikajcrvd+uEPf5ik7NAatOh2Xnx3AAAAAACgtfz+gEzTpbLifI0a2k8rN3zS4ZijhvZTWXG+IpGI/P5AHLIEgK7LdgWev/71r60+tk+fPpoxY4ZKS0sTmBHaihbdzovvDgAAAAAAtFY0GpXPZyk7O0NTx5eq+nCDNm2rbne8C4rzNXV8030+n89SlPYdADgl2xV47rjjjlO+73Q6lZ2drYEDB+qCCy6QYRhJygytRYtu58V3BwAAAAAA2sKyQqqvDygzM033TR6iuUvK29XJM2poP00dXyqXy6H6+oAsK5SAbAGgazGilMJxEgcP+hSJtO/yMAxDubmZcjgcmv3iB3Fr0Z02cbAikYgOHarnKY4E4bsDAADt5XAY6tXLm+o0bOG8887rcAzDMLR169Y4ZIMv6shaBwBwYh6PqczMNEnS5m3Vmr+8QpX7Wt4ZpLB3lqaMKVFZcb4kqb4+IL/fkmFIpumW2+2Uy+WQ0+mIfSYcjigUiigYDPMgKYCEs/M6x3YdPOgaaNHtvPjuAAAAOo6feQAA3Y3fbykUCsvrNVVWnK/ZPxupip01WrulSjt216pyf52sQEhmmkuFBVnq3zdHlwzqo5KiPElSJBKRz2cpEGiKYZpuORwn3rnH5XLK5XIqPd0tj8eUZQXl9wf49xdAt9MpO3g+++wzORwOZWVlpTqVLi0eT7U1P70RCkXi1qLr91sdygmtw3cHAADays5PtiXbwIEDZRiGzjjjDI0bN05nnHFGu+KMHz8+zplBooMHABLJMAx5PGmnLNAcrXlWr98fUFqaU16vKYejqVunYmeN1jQXiPYdkRUMy3Q7Vdg7W/375mj4CQpEbO0GIN7svM6xXYFn//79Wr9+vXJzc/X1r3/9mPc++ugj/du//Zv+/ve/S5LKysr0i1/8QmeffXYqUu3y4rXoiXeLLpKH7w4AALSFnRc+yXbNNddox44dkiSHw6GvfvWrmjBhgq666iqZppni7ECBBwASr61brB13D2JZhSr3cw8CQOrZeZ1juwLPnDlz9Pvf/17f//739dOf/jT2emNjo775zW9q3759x7Rb9u7dWytWrJDXa8//wZ1ZPBc9puk67gmMtrbo8gRGavDdAQCA1rLzwicVysvL9dJLL+nVV19VXV2dDMOQ1+vV1VdfrWuvvValpaWpTrHbosADAPbCLiIA7MzO6xzbFXi++93v6r333tOiRYt0/vnnx15//vnn9cADDygnJ0c/+clPlJ6erscee0z79+/XnXfeqalTp6Yw664p3ouejrTo2uwy7Xb47gAAQGvYeeGTSpZlaeXKlVq8eLH++te/KhqNyjAMFRUV6dprr9WYMWPUq1evVKfZrVDgAQD7ME2XsrMzFApFNPPJjR2eA3zf5CFyuRw6cqSBB04BxIWd1zm2K/CMHDlSe/fu1ebNm5Wenh57ffLkyVq/fr0eeOABXX/99ZKkNWvW6Ac/+IEGDRqkhQsXpirlLitRi562tujCPvjuAADAqdh54WMXn376qRYtWqSlS5eqqqpKhmHI6XRq+PDhuu6663TppZfK6XSmOs0ujwIPANiDYRjKzc2Uw+HQ7Bc/aFfnzheNGtpP0yYOViQS0aFD9Tx4CqDD7LzOsV2Bp6ysTC6XS++++27stUgkogsvvFCWZWn9+vXq0aNH7PWSkhJ5vV799a9/TVXKXRaLHgAAALSFnRc+drRu3TotWrRIb7zxhiyraRuZKVOm6Cc/+UmKM+v6WOsA3Q8PLNqT12sqIyNNm7ZV6z/mrY9b3Bm3Xqyy4nw1NATk87FVG4COsfM6x9HyIckVDocVCASOeW379u1qaGhQ//79Y8UdqWlYaXZ2turr65OdJgAAAAB0yLBhwzRjxgzdcccdcrlckprWQwCA+Gmaf2YqN9errKx0pae75XI5ZRhG7JfL5VR6ultZWenKzfXK6zVlGC1vT46OaS66SdKCZRVxjT1/eVM803SLrxJAV+ZKdQJfdNppp6mqqkq7d+9W3759JTVtxSY1dfd8UX19vXJycpKaIwAAAAB0xMaNG7Vo0SL97//+rxobGxWNRpWTk3PMHFIAQMeYpkterymHo+n55oqdNVqzpUo7dteqct8RWcGwTLdThb2z1b9vjoYP6qOSojxlZKTJNF3y+SxmuCRQ85zfip01qtxfF9fYlfvqVLGzRiVFeTJNtxobg3GNDwB2YbsCz+DBg1VVVaXZs2froYceUm1trZ5//nkZhqHhw4cfc+zu3bsVCAR02mmnpShbAAAAAGidqqoqLV68WEuXLtWnn36qaDQqp9Opr3/965owYYJGjhwpt9ud6jQBoEvweExlZqZJkjZvq9b8ZRUnLCI0BsLaXnlY2ysP65V3PlZh7yxNGVOisuJ8ZWdnqL4+IL+fLb4Swe1umjm3ZktVQuKv3VKlkqI8ud1OCjwAuizbFXi+973v6ZVXXtGyZcv02muvKRgMKhgMqm/fvrr00kuPOXbdunWSxFNuAAAAAGwpEAho1apVWrx4sTZu3KhoNKpoNKqzzjpLEyZM0Lhx45Sfn5/qNAGgS2ku7oRCEc1dUq6VGz5p9Wcr99Xp/nnrNWpoP00dXxorElHkiT+Xq6mzasfu2oTEb47bfB4A6IpsV+ApLS3VQw89pAcffFB+v1+SdM455+g3v/lNbF/qZkuXLpUkDRkyJOl5AgAQLwx8BYCup7y8XIsWLdKrr76quro6RaNReTweffOb39SECRN0wQUXpDpFAOiSTNMVK+7MfHKjNm2rbleclRs+UfXhBt03ecjn8cJs1xZnzeueyn1HEhK/uWPr6PUVAHQ1RjRqz1tFjY2N2r59u7Kzs1VYWBjbL7VZIBDQK6+8omg0qssvv1zZ2dkpyrTrOnjQp0jElpcHAHQJhmHI40mL7T3dkkgkKssKyu8PyKb/fAPo5hwOQ716eVOdhi0MHDgwNqD7oosu0oQJEzRq1Cilp6enODNIrHWArsowDOXmZsrhcGj2ix+0qXPnZEYN7adpEwcrEono0KF6fg6Po7w8rwzD0JifLkvIg2wOQ1r26FhFo1HV1PjifwIA3Yad1zm26+Bplp6ertLS0pO+n5aWpnHjxp30/aeeekp+v1933HFHItIDAKBDGPgKAN3D6aefLklavHixFi9e3KbPGoahp59+OhFpAUCX5PGkyeFwaNO26rgUd6SmTp5hpX1UVpwvjydNPl9qt2rrit3/ptupxkA4/nHTbHvbEwDipsv+TbdgwQIdPHiQAg8AwHYY+AoA3UM0GlVVVZX27t3brie+mzuAAAAtay58SNKCZRVxjT1/eYVm/2ykTNMtv99KSeGkNd3/LpdTLpdT6elueTym7bv/w+GIXK6mh9q2Vx6Oe/zCgqzYeQCgq+qyBR4AAOyIga8A0D2MHz8+1SkAQLfSXPio2FlzwoenOqJyX50qdtaopChPpulWY2MwrvFb0lW7/0OhpgJP/745CSnw9O+bEzsPAHRVFHgAAEgSBr4CQPfxy1/+MtUpAEC34nY7JUlrtlQlJP7aLVUqKcqT2+1MaoGnK3f/B4Nhpae7NXxQH73yzsdxj3/JoD6x8wBAV+Vo+RAAANBRhmHI6zUlSXOXlLe7uNNs07ZqzV1SLknyek228QEAAEC35nI13eLasbs2IfGb4zafJxmO7v6f/eIHun/e+lZ3JzV3/89+8QOFQhFlZqbJ4zETnHHbWFZQkUhUJUV5se3U4qWwd5ZKivIUiURlWcntuAKAZKLAAwBAEiRq4OvmbdVyOBzyeNLiEhMAAADojJzOpltclfuOJCR+c2Gl+TyJ9sXu//auIVZu+EQzn9wYK/KYpn0284lGFSu+TBlbEtfYU8Y0xbOsYEpmJgFAslDgAQAgwRI98FVqik8TDwAAALo7K0HbcVmB5G2J3J26//3+gCKRiMqK8zVqaL+4xBw1tJ/KivMViUTk9wfiEhMA7IoCDwAACZaMga8OhxErIgEAUuuBBx5QdXXHbsZ90cqVK7VixYq4xgSArsj8fBZP3OOmJa/zpTt1/0ejUfl8TbOBpo4v1QXF+R2Kd0FxvqaOL5Uk+XyWorTvAOjiKPAAAJBgyRj4evR5AACp9fzzz+vKK6/Ugw8+qJ07d7Y7TmNjo5YtW6Zvfetbuuuuu7Rr1674JQkAXUw4HJEkFfbOTkj85hkxzedJlO7Y/W9ZIdXXB+RyOXTf5CHt7uQZNbSf7ps8RC6XQ/X1AVlW8rquACBV7LPxJgAAXVRXHPgKADi5mTNnatasWXr22Wf13HPP6bzzztM111yjr3zlKzrvvPPkdp+847KqqkpbtmzR6tWr9cYbb6ihoUHRaFRXXXWVJkyYkMQ/BQB0LqFQRC6XU/375mh75eG4x+/fNyd2nkRKRvd/SVGeTNOtxsZgXON3hN/f1MWTmZmmaRMHa1hpH81fXqHKfS3/PyjsnaUpY0pU9nn3T319IBYPALo6CjwAACRYVxv4CgA4tYkTJ2r06NGaP3++nn32WW3dulV///vfJUkul0tnn322cnNz1aNHD6WlpenIkSP67LPPtGfPHh08eFCSYlvKDBkyRHfddZcGDx6csj8PAHQGwWBY6eluDR/UR6+883Hc418yqE/sPImUjO7/kqI8ud1OWxV4pKYiTygUltdrqqw4X7N/NlIVO2u0dkuVduyuVeX+OlmBkMw0lwoLstS/b44uGdRHJUV5kqRIJCKfz6JzB0C3QoEHAIAk6QoDXwEArZOZmal/+Zd/0dSpU/XnP/9ZL7zwgsrLyxUMBrV9+/bYcYZhHDcfoFevXho9erS+/e1vq6ioKNmpA0CnZFlBeTymSoryVFiQFdful8LeWSopylMkEpVlJbYo0t27/y0rpEAgLI8nTabpVklRXqyAczLN34vfH2DmDoBup8sWePgLHQBgN6bbqcZA/Is8yRz4CgBoG9M0NWHCBE2YMEE+n0/vv/++tmzZourqah06dEiWZSknJ0e5ubnq37+/vvKVr1DUAYB2iEabijwZGWmaMrZE989bH7fYU8aUSGqKn+jbTXT/N93T8/ks+f2WTNMtt9spl8txTM7hcEShUETBYDgp3wsA2FWXvSP0/e9/X/X19alOAwAAhcNN+4EX9s5OyH7gyRr4CgDoGK/XqxEjRmjEiBGpTgUAuiS/PyDTdKmsOF+jhvbTyg2fdDjmqKH9VFacr0gkIr8/EIcsW4fu/6aiXWNj0HZbyQGAndiuwHPllVdq4sSJuvbaa9WrV692x5k8eXIcswIAoP26ysBXAEDXU15erqVLl2rjxo369NNPlZOTo0GDBunOO+/U2WeffcyxO3fu1EMPPaRNmzbJ7XZrxIgRmj59unJzc1OUPQAcq7nzIzs7Q1PHl6r6cIM2batud7wLivM1dXypJMnnszq8W4xhqMWOlGZ0/wMAWsN2/Zi7d+/Wb3/7W40YMUL/+q//qnXr1qU6JQBAJ2MYUnq6W1lZ6erZM1N5ed7Yr549M5WVla70dLcMIzn5NA9iHf75YNZ4S9bAVwBA1zN//ny99tpruvjii3Xvvffq+uuv13vvvacJEyYcMyto3759+s53vqPKykrdddddmjx5st566y3dcsstCgSS90Q7ALTEskKqrw/I5XLovslDNGpov3bFGTW0n+6bPEQul0P19QFZVvs7XwzDkNdrKjfXG1uLuFxOGYYR++VyOT9fozQtUgp7Z7f7fKdC9z8AdC22K9vfdtttWrJkifbv369Vq1bptdde05lnnqnrr79eEyZM6FBXDwCgazMMIzaM0+E4cfXG5XLGFk8ej5mUYZxdZeArAKD9AoGAampq5Ha7ddpppx3znt/v13/+53/qnXfekcPh0KWXXqrbbrtN6enpCc/r5ptv1qOPPqq0tLTYa1dffbW+9a1vad68eXr00UclSf/1X/+lhoYGLV68WH36ND1YUFpaqltuuUVLlizRt7/97YTnCgCt5fdbkqTMzDRNmzhYw0r7aP7yClXua/nn8MLeWZoypkRlxfmSpPr6QCxee5imS16vKYej6Rnrip01WrOlSjt216py3xFZwbBMd9N2zv375uiGK89Vz6x0uv8BAK1iRBN5R6udIpGI3nrrLS1cuFBr1qxROByWYRhyOp264oordP3112vYsGGpTrPLO3jQp0jEdpcHAJxQWxdOwwf1UUlRnqSmf3d8PqtDT+W1xOs1lZGRps3bquM68HXGrRerrDhfDQ0B+XztX3gCQDw4HIZ69fKmOg1beu655/Tggw9q3Lhx+uUvf3nMezfddJPef//92MMGhmHoK1/5iv7nf/4n9iR3sk2YMEGStHjxYknSsGHDdNFFF2nWrFnHHHfVVVfp9NNP1x/+8Idkp9ghrHWA7uFEa4S1zWuE/XWyAiGZaS4VFmSpf98cXRLnNYLHYyozs6mAvnlbteYvq2jxYa9vDOmnH10/WBU7azR9zjvtPvfJ/PL2r6mkKE91dY3MtgGAVrLzOsd2HTyS5HA4dNlll+myyy7T/v379dJLL2nRokWqqqrSypUrtWrVKrp6AAAxrV2b44JuAAAgAElEQVQ4NQbC2l55WNsrD+uVdz4+5um87OyMDj+ddypdaeArAKDt1q5dK0n61re+dczrb7zxht577z05HA5dc801Sk9P19KlS/Xee+9p2bJlGjduXNJzjUajqqmp0YABAyRJ+/fv18GDB1VSUnLcsaWlpXr77beTnSIAtIplhRQIhGNd/iVFebECzsk0d8a31OXf0jwdqWn3gFAoorlLylv98/+aDz7V98eU0P0PAGgV283g+aKCggJNmzZNb7zxhv77v/9bV155pZxOp3bv3q3f/OY3GjFihO68805m9QBAN9Vc3AmFIpr94ge6f976Vi+CKvfV6f556zX7xQ8UCkWUmZkmj8dMSJ7NA18laer4Ul3w+ZYP7RXvga8AgMT6xz/+IUn60pe+dMzrK1askGEY+sEPfqBf//rXmjlzpu655x5Fo1G9/PLLqUhVy5cv1/79+/XNb35TklRd3TSg/ItbyzW/VltbyxweALbV/HP4oUO+WNdKKBRWNBqN/QqFwmpsDKqurlGHDvlO+fN1a+fpNBd3Zj65sU0PdzVYIb35/m5J0pSxxxfWO2LKmKZ4lhUUywcA6Bps2cFzIoZhaPjw4Ro+fLj279+vn/70p3r33XcVCoW0atUqrVq1SoWFhZo8ebKuu+46OZ3OVKcMAEgw03TFijszn9yoTduq2xVn5YZPVH24QfdNHvJ5vHBCtmtrHviamZmm+yYPadOTfEcbNbSfpo4vjcvAVwBAchw6dEjp6enq0aPHMa9v3LhRknTdddfFXhs7dqweeOABbdu2Lak5StLOnTs1Y8YMlZWVafz48ZIky2p6QOHoOT3NTLPpwYjGxsYTvg8AdhGNSo2NwQ5tS9aabaELembq8R9fKk+GW3OXlLdrjfKn/92m4YPPoPsfANAi23fwHK2qqkq/+93vdP311+u9996T1FT4Oe+88+R0OvXJJ5/o5z//ua6//nodOnQoxdkCABKp+ck5Se1eOB1t07ZqzV1SLqlpXk6iZh74/Zbq6wNyuRyaNnGwZtx6sQp7Z7Xqs4W9szTj1os1beLgWHEnUVvKAQDiq6GhIXZDsNmePXt06NAhnX766erbt2/s9czMTGVnZ6u2tjapOR44cEBTp05VVlaWZs2aFXtorrmIc6IunebiT3p6evISBYAU8HhMZWdnyOFwaPO2ak17ZLWmz3lHr7zzsbZXHlZjIKxoVBp3aX95MtzatK263YWZ2jortja5le5/AMAp2L6DJxwO680339TChQu1bt06RSIRRaNR5eTkaPz48brxxhtVWFiompoaPf/883rqqae0detWPfbYY/rFL36R6vQBAAni8aTJ4XB0aOH0RSs3fKJhpX1UVpwvjycttqVavPn9lkKhsLxeU2XF+Zr9s5FJH/gKAEiuHj166NChQzpy5Iiys7MlSRs2bJAklZWVHXd8KBSSx+NJWn51dXX6wQ9+oLq6Oj333HMqKCiIvZef33Rj8cCBA8d97sCBA8rJyaF7B0CXdvS20Kfqws8wXbrswqaC/YJlFR0659ubP9W5fXtq7Igi/fvkIZpH9z8A4ARsW+DZs2ePXnzxRS1evFg1NTWxpwvKysp04403atSoUccsIvLy8vSjH/1Il156qSZOnMigT8BmWhpAGQpFFAyG2QsYrb5WTNMtqeMLpy+av7xCs382Uqbplt9vJex6TOTAVwCA/Zx//vlau3atXnrpJU2ePFmRSEQvvfSSDMPQkCFDjjn20KFDqq+vV1FRUVJysyxLt912m3bt2qWnnnpK/fv3P+b9goIC5ebmqqLi+H9zy8vLNXDgwKTkCQCp0JZtoYcPPkOZ6S5V7Kxp9VzQU1nwctPfu2NHFGnaxMEaVtpH85dXqHJfy7ELe2dpypgSlX3e/UP3PwB0TbYr8KxcuVIvvPCCNmzYEBt25/F4NGbMGN14440699xzT/n5L3/5y8rLy1NNTU2SMgZwKoZhxG5gOxwn3vKqeQBlerpbHo/JDexuqq3XiqS4LZyOVrmvThU7a1RSlCfTdHdoj+6WNA989fstCqAA0MWNHz9ea9as0WOPPaZ169bp0KFD2rp1qzwej0aNGnXMsc3bUSejwBMOh3XnnXfqgw8+0Jw5c07YTSRJ3/jGN7R06VLt3btXp59+uiRp/fr12rVrl26++eaE5wkAqdDWbaHPPztXkrRmS1Vczh+NNj2Atq3ysG6bUEr3PwDgOLYr8Nx5552x/z7vvPN044036pprrlFmZmarY7A9AGAPrRlAabqdKuydrf59czT88x9CMzLSZJoufgjtRtp6rdxw5bnqmZUet4XTF63dUqWSojy53c6EFniaxWPgKwDA3q6++mqtWbNGS5Ys0dq1ayU1zbZ54IEHYlu2NXvllVdO2NmTCA8//LBWr16tyy67TLW1tVq2bNkx748dO1aSdNttt2nlypWaNGmSJk2apPr6ei1YsEDnnnuurr322oTnCQCp0NZtofufmSNJ2rE7vjPU1nzwqf5vR41um1CqYaWn0/0PAIixXYHHNE1dffXVuvHGG1VaWtquGKtXr45zVgDaqnmPYknavK1a85dVnLDTojEQ1vbKw9peeVivvPPxMW3k2dkZtJF3A+25Vq6++Cz1zEqP+8KpWXNcl8vRwpEAALTeL3/5S1133XXavHmzsrOzdfHFF6tv377HHBMIBJSVlaVx48bp61//esJz+vDDDyVJb775pt58883j3m8u8Jx++ul69tln9fDDD+uxxx6T2+3WiBEjdPfdd/OAHYAuqXnraKn120L3zmuanVa570jc86n1WZq1cLO+NqiPotGoLCtE9z8AwH4FnjVr1hz3BBuAzqW1AyhPpHJfne6ftz42CLL5xj9Fnq6pvddKIhdOkmIFpqMXSwAAxMOFF16oCy+88KTvp6WlaebMmUnL55lnnmn1sQMGDNCCBQsSmA0A2Efz1tFt2Rba/fn6wQqGE5KTFfjnDhd1dY0JOQcAoHOxXYGH4g7QubVlAOWprNzwiaoPN+i+yUM+jxdmu7YupiPXSjIXTgAAAAC6H7fbKalt83SC4YhMh1Om26nGQPzXKmaa7W7jAQBSzHb/Mhw8eFB//vOflZubq2uuueaUxy5fvly1tbW65pprlJubm6QMAZxMWwdQtmTTtmrNXVKuaRMHy+s1FQiE2Tu4i+jotcLCCQDQmQUCAa1bt04VFRU6ePCgJKlXr14qKSnRsGHD2PIMAGygebvmtmwLva/Gr36nZ6uwd7a2Vx6Oe06FBVmSmrZiAwBAsmGBZ/ny5XrkkUd0xx13tHjshx9+qKeeekqSNGnSpESnBqAFbR1A2RorN3yiYaV9VFacL48nTT4fW7V1BR29Vlg4AQA6q2effVb/+Z//qc8+++yE7/fo0UPTpk3Td7/73SRnBgA4WvN2zW3ZFnrHnlr1Oz1b/fvmJGSd0r9vjiQpFGKdAgBoYrvhAqtXr5YkjRo1qsVjx40bp2g0qjfeeCPRaR3jiSeeUHFx8XEdRmvXrtU999yja665Ruedd55GjhyZ1LyAVGrPAMrWmr+8KZ5pumUYcQ2NFIjHtbJjT9NTdM0LnHhj4QQASIR7771Xv/jFL1RbW6toNKqCggKVlpaqtLRUBQUFikajqq2t1UMPPaTp06enOl0AgNq2LfTWjw9JkoYP6pOQXC75PG4wQVtVAwA6H9sVeCorK5WWlqaioqIWjz333HNlmqZ2796dhMya7Nu3T3PnzlVmZuZx761YsUIrVqyQ1+tVfn5+0nIC7KA9Ayhbq3JfnSp21sjhMGKFAXRe8bhWWDgBADqbFStWaNGiRYpGoxozZoxWrVqlv/zlL1q4cKEWLlyov/zlL3rttddiD7EtXbpUL7/8cqrTBoBuz/x8Fk9rrPngU9U3hlRSlBfbFSBeCntnqaQoT5FIVJYVjGvsZoYhpae7lZWVrp49M5WX54396tkzU1lZ6UpP58FLALAT2xV4Dh48qIyMjFYfn5GRoZqamgRmdKxf/epXGjRokEpKSo5776677tL777+vP/3pTxo4cGDScgLsoD0DKNti7edx3W344Rr2FI9rpbMvnAAA3c8f//hHGYahm266SY888oj69et33DGFhYV6+OGHddNNNykajeqPf/xjCjIFAEj/3K65sHd2qz/TYIX05vtNDyFPGXv8faOOmDKmKZ5lBRXv0bTNM1Jzc72xIo7L5ZRhGLFfLpczVvzJzfXK6zVlUOkBgJSzXYHH6/Wqrq5OltXynA3LslRXV9emglBHvPvuu1q1apXuueeeE75fUFAgt5vuAnRP7RlA2RbNcZvPg84rHtdKZ104AQC6r23btskwDE2bNq3FY6dNmybDMLR9+/YkZAYAOJHm7Zrbui30n/53m474AyorzteooccX89tj1NB+KivOVyQSkd8fiEvMZqbpUm5upjIy0mI7LTyxuFw/mfW2Jk5foTE/XaaJ01foJ7Pe1hOLy2O7a2RkpCk3N1Omabvx3gDQrdjuTumAAQMUiUT05ptvtnjs6tWrFQ6HdfbZZyc8r3A4rJkzZ+q6665TcXFxws8HdDbtGUDZFs1beTWfB51XvK6VzrZwAgAgKytLPXv2bPG4nj17Kjs7myejASCFmrdrbuu20LV1luYuKZck3Tq+VBcUd2wL/wuK8zV1fKkkyeezFI3jU2gej6ns7Aw5HA5t3lataY+s1vQ57+iVdz7W9srDagyEFY1KjYGwtlce1ivvfKzpc97RtF+v1uZt1XI4HMrOzpDHY8YtJwBA29juTunIkSMVjUb1yCOPaP/+/Sc9bv/+/XrkkUdkGIauuOKKhOf1pz/9SVVVVbrzzjsTfi6gM2vLAMo2xQ2EEhIXqdPRa6UzLZwAADj77LPl8/nk9/tbPNbv98vn8yXlQTYAwIlZVlCRSLRd20K/vflTLXtrp9wuh/598pB2P5A2amg/3Td5iFwuh+rrA7Ks+K2LPR5TmZlpCoUimv3iB7p/3vpWz0it3Fen++et1+wXP1AoFFFmZhpFHgBIEdsVeG644Qb17t1be/fu1bhx4/SHP/xBu3btUiAQUCAQ0K5du/TUU09p3Lhx2rt3rwoKCvT//t//S2hOhw8f1u9+9zvdfvvtys3NTei5gM6uLQMo2xQ3jbbvriYe10pnWDgBACBJ1157rcLhsJ599tkWj33uuecUDod17bXXJiEzAMCJRKOKzeRsz7bQC16uiK1Vpk0crBm3XqzC3q0rFBX2ztKMWy/WtImDY2sUv7/lUQatZZquWHFn5pMbtXLDJ+2Ks3LDJ5r55MZYkYft2gAg+Wz3N29GRoZmz56tKVOm6PDhw/rVr36lX/3qV8cdF41G1bNnTz3xxBPKzMxMaE6PP/64evTooZtuuimh5wE6s3A4IpfLqcLe2dpeeTju8ZufmGoedInOK97XyoKXKyRJY0cUadrEwRpW2kfzl1eocl/LT58V9s7SlDElKvu8+yfeCycAAJrdeOONevfddzVr1iwFg0Hdcsst8ng8xxzT0NCgBQsW6IknntDo0aN1ww03pChbAIAk+f0BmaYrti10Wwoh0ag0f3mFtlUe1m0TSlVWnK/ZPxupip01WrulSjt216pyf52sQEhmmkuFBVnq3zdHlwzqo5KiPElSJBKRz2fF9QE0wzDk9TZ128xdUq5N26o7FG/TtmrNXVKuaRMHy+s1FQiE2Q0BAJLIdgUeSfrSl76kJUuW6LHHHtOrr76qUOjYf8jcbrdGjx6tu+66SwUFBQnNZdeuXXrhhRd0zz33qLr6n//oWZalYDCoPXv2yOv1KienbUP3gK4mFGq6ad+/b05CCjzNgy2bB12i84r3tWLXhRMAAEebPn26TNOUx+PR7NmztWDBApWUlCg/v+khg+rqalVUVKixsVFZWVlKS0vT9OnTj4tjGIYeeuihZKcPAN1SNBqVz2cpOztDU8eXqvpwQ5sLIms++FT/t6NGN3yjWFdcVKiSorzYOuRkIpGoLCsovz8Q92KJx5Mmh8OhTduq292580UrN3yiYaV9VFacL48nTT4fD80BQLIYUZuX1RsaGlRRUaEDBw7IMAyddtppKikpUXp6elLOv3HjRk2aNOmUx0yaNEn33nvvMa9NnTpVH330kVavXp3I9BLq4EGfIhFbXx6wkfR0t7Ky0lWxs0bT57wT9/i/vP1rKinKU11doxobg3GPj+RJ5LWS4zX1+I9HKDc7vVWDqRO5cAKA7sjhMNSrlzfVadjSwIEDZRhGq/+9+eKxzb83DEN///vfE5Vmt8FaB0BbHD2vZu6S8nYVRkYN7aep40vlcjkUCjXNI3U6/zk5IRyOKBSKKBgMy7KCSsTyxDCk3FyvHA5D0x5Z3eqZO61R2DtLs382UpFIVIcO+RKSPwCkip3XObbs4DlaRkaGLrroopSdf8CAAZo9e/Zxrz/++OPy+/2699571bdv3xRkBtiLZQXl8ZixAZTx/kGxpCgvdjMenVsir5Vsb5p69chQJBKVz9cot9spl8uR9IUTAABfNG7cuFY9fAAAsJ/mbZwzM9M69bbQpumWw2GoYmdNXNdhklS5r04VO2tUUpQn03TzYCYAJIntCzyplpubqyuuuOK4159++mlJOua9Dz/8MNax88knn6iurk5z5syR1PTE3siRI5OQMZAazQMoMzLSNGVsie6ftz5usaeMaRpoyc34riFZ10pjY5BFBQDANh5++OFUpwAA6AC/31IoFJbXa3babaHdbqckac2WqoTEX7ulSiVFeXK7nazFACBJbF/gsSxLn3322XFzeL6oT58+Scro5LZu3apZs2Yd81rz78ePH0+BB11eRwZQnsyoof1UVpyvSCQivz8QhyxhB1wrAAAAADobywopEAjL40mTabpTPk+nrVyupp0NduyuTUj85rjN5wEAJJ4tCzwNDQ2aP3++VqxYocrKyhaPNwxDW7duTUJm//TMM88c99qECRM0YcKEpOYB2Ek8BlAe7YLifE0dXypJ8vmslP8wjPjhWgEAAADQGTWvZfx+S6bp7lTbQjfnWLnvSELiN2/7dvT/CwBAYtmuwHPkyBF95zvf0Y4dO1p9g44beYB9WFZI9fUBZWam6b7JQ+IygLK+PpDSNnYkBtcKAKC7qqmp0apVq1RRUaGDBw9Kknr16qWSkhJdddVVyss79dPgAIDUi0bVabeFtoLhxMQNsBYDgGSzXYFnzpw5+uijj+RyufTd735Xl19+ufLz8+V0OlOdGoBW6ioDKJF4XCsAgO4kHA5r1qxZeuqpp2JbUDc/rGYYhpYuXaqH/z979x4dVX3uf/yz57aTzCRECCGCSbChRm0MxBtYQU+h1liOUKyeurygBSq2eqza2morrN9RW23taT09pS1UUI+eU1u7TLFeqG21FqxaWy4xVUOhSBBMQiBAZpLsuez5/THMyCUhF2YyM8n7tRZr6Z6d736YZMj+7uf7fZ4HHtCCBQt0yy23MAcCAKSE6XaqO5j8JI/pybjHjAAw7BnRDNv+8slPflI7d+7UkiVLdNVVV6U7nBFtzx6/bDujfjyQZUzTJZ/PlMMR256dbQ0oMXT4WQGA4cHhMDRmjC/dYWSsr3zlK3r++ecVjUbl8XhUVVWlkpISSVJzc7MaGhoUDAZlGIb+9V//VQ8++GCaIx6+mOsAGIlOOCFPLpdTX/mvP2lzU3vSx68sO0Hf+/IFCocjam/vTPr4AJAumTzPybjUektLixwOhz772c+mOxQAxynbG1Bi6PCzAgAY7n7/+9/rueeekyR9/vOf1xe/+EUVFBQcdk5HR4d+8pOfaNWqVXr22WdVW1urWbNmpSNcAMAwFA7bcrmcmlRamJIEz6TSwsR1AABDI+O6no0aNUper1emaaY7FABJEG9AuXevXx0d3eruDikcjigajSb+hMMRdXeH1NHRrb17/fL7LR7Yj0D8rAAAhrNf/epXMgxDN954o77+9a8fldyRpPz8fH3ta1/TjTfeqGg0qqeeeioNkQIAhqvQwd47MyaPT8n40w+OG0pRjx8AwNEyLsFz1llnqaOjQy0tLekOBUASxRtQdnR0q729U21t/sSf9vbOxAN9ntWDnxUAwHD01ltvyeFwaOHChX2eu3DhQjkcDr311ltDEBkAYKSwrJBsO6qqiiKVjctP6thlJfmqqihKVFoAAAyNjEvwfOELX5DT6dSyZcvSHQoAAAAAJMX+/fvl8/mUn9/3A7X8/Hzl5+dr//79QxAZAGCkiEaVSL4smluV1LEXzYmNZ1ksxgOAoZRxCZ6qqio98MAD+vWvf61vfOMb2rFjR7pDAgAAAIDjMmrUKPn9fvn9/j7P7ejoUEdHh0aNGjUEkQEARpJAICjbtlVTWazaaeVJGbN2WrlqKotl27YCgWBSxgQA9I8r3QEcKd5E1Ol0qq6uTnV1dYm+PL0xDEO///3vhypEAAAAABiQM844Q6+88ooeffRR3Xzzzcc899FHH5Vt26qqSu7qagAA4r1PCwpytXhetVrbu7S+sXXQ451ZWazF86oliR6pAJAGGZfg2blz51HH9u3bp3379vX6NYZhpDIkAAAAADgul112mf74xz/qxz/+scLhsL7whS8ctYjN7/frZz/7mVasWCHDMHT55ZenKVoAwHBmWWF1dgaVl+fRkgVTtbyuXmte3z7gcWqnlWvxvGq5XA51dgZlWeEURAsAOBYjmmGp9bq6ukF93bx585IcCfbs8cu2M+rHAwAAABnM4TA0Zowv3WFkrNtuu00vvPCCDMOQaZo644wzVFxcLElqaWlRQ0ODLCu2+vnTn/60vv/976c54uGLuQ4ASF6vqbw8jyRpQ2OrHn6mQU3NHX1+XVlJvhbNqVJNZex3WGdnUIGAldJYASCdMnmek3EJHmQOJj0AAAAYiEye+GSCUCik73//+3r88ccVDsdWOcerEcSnZS6XS9dee61uv/12ud3utMU63DHXAYAY03TJ5zPlcMTadDdsbdO6Tbu0Zcc+NbV0yAqGZXpcKhuXr0mlhZo+ebyqKookSbZty++32LkDYNjL5HkOCR70ikkPAAAABiKTJz6ZpKWlRS+++KIaGhq0Z88eSdKYMWNUVVWlT33qUxo3blyaIxz+mOsAwIcMw5DX65FpuuVw9N0GwbajsqyQAoEgPXcAjAiZPM8hwYNeMekBAADAQGTyxAc4FHMd4PgYhmSabrndTrlcDjmdjsRrkYitcNhWKBSRZYXEU6fswfcVAHqWyfMcV7oD6E1zc7MeeeQRrVu3Trt27ZJlWXr77bcTr+/fv18///nPZRiGFi5cKJcrY/8qAAAAAJCRAoGAVq5cqU2bNumtt97S/v37df/99+uyyy477Lw777yzx36pJ598stasWTNU4QJIs/7s9HC5nHK5nMrJccvrNdnpkUWiUam7O6Tu7lC6QwEA9FNGZkVeffVV3XrrrfL7/YkbgHht6rhRo0bp97//vf7+979r0qRJmjVrVjpCBQAAAICs1d7ermXLlmn8+PGqrKzUX/7yl17P9Xg8uu+++w47lp+fn+oQAWSInnq1rI33amk+ICsUkel2qqykQJNKCzXjYK+W3FyPTNNFrxYAAFIg4xI8H3zwgW655RYFAgHNnDlTn/nMZ7RkyRIdOHDgqHM/+9nPqqGhQa+88goJHgAAAAAZ4Uc/+lHSxrr55puTNlZPiouLtW7dOo0dO1ZvvfWWLr/88l7Pdblcmjt3bkrjAZCZvF5TeXkeSdKGxlY9vLpBTS0dR53XHYxoc1O7Nje16/lXt6msJF+L5lSpprJYBQW56uwMKhCwhjp8AACGrYxL8KxatUqBQECXXHKJfvCDH0iS7rnnnh7PnT59uiTprbfeGrL4AAAAAOBYfvSjHx1VgWCwUp3g8Xg8Gjt2bL/Pj0Qi6urqks+XmTXIASRfPLkTDttaXlevNa9v7/fXNjV3aOmK11Q7rVyL51UnkkQkeQAASI6MS/CsW7dOhmHoy1/+cp/nlpaWyuPx6P333x+CyAAAAACgb+ecc066Q0iJrq4unXXWWerq6tKoUaM0e/ZsffWrX5XX6013aABSxDRdieTOvave0PrG1kGNs+b17Wpt79KSBVMPjhehXBsAAEmQcQmeDz74QDk5OZo4cWK/zs/Ly5Pf709tUAAAAADQT9ddd51ycnISFQeGg7Fjx2rRokU6/fTTFY1GtXbtWv3f//2f3n33XT3++ONyuTJuagngOBmGIZ/PlCQtr6sfdHInbn1jq5bX1eumK6bI5zMVDEYSfZcBAMDgZNxduGEYsm27X+eGw2H5/X5WjAEAAADIGDfffLPGjh2rtWvXJo7Nnz9fhYWF+uEPf5jGyAbvK1/5ymH/P3v2bE2cOFE/+MEP9Nvf/lazZ89OU2QAUsXr9cjhcGh9Y+uAyrIdy5rXt+vj1eNVU1ksr9cjv59SbQAAHA9HugM40oQJExQMBrVr164+z33zzTcVDof7vdsHAAAAAIbCkavS//KXv2j9+vVpiiY1rr/+ejkcDv35z39OdygAkswwJNN0S5JWrm5I6tgPPxMbzzTdSlK7MgAARqyMS/Ccd955kqQnn3zymOeFQiE99NBDMgxDM2bMGIrQAAAAAKBPubm5OnDgQLrDSLmcnBwVFhZq//796Q4FQJKZplsOh6GGrW1qaulI6thNzR1q2Nomh8NIJJEAAMDgZFyC5/rrr5fb7daqVav01FNP9XjO3//+d33+85/Xpk2b5PV6ddVVVw1xlAAAAADQs4kTJyoUCumRRx5RV1dXusNJGb/fr/b2do0ePTrdoQBIMrfbKUlau6nv6iqDse7guPHrAACAwcm4HjwTJkzQfffdpzvvvFNLly7VD37wA3V0xFaLXHnlldq5c6fa2toUjUblcrn0ne98hwkFAAAAgIwxZ84cvfPOO/rud7+r7373u4nje/bs0WmnndbvcQzD0KjwufQAACAASURBVNtvv52KEAfEsiyFQiH5fL7Djv/4xz9WNBqlogIwDLlcsfXAW3bsS8n48XHj1wEAAIOTcQkeKTYhGjNmjO655x5t3/5hI7+NGzcm/ru8vFz/7//9v0RJNwAAAADIBNddd53ef/99/eIXv1A4HE4cP7IvT6Z44okndODAAbW2tkqSXn75ZTU3N0uSrr32Wu3fv1/z5s3T7Nmz9ZGPfESStG7dOr3yyiuaMWOGZs2albbYAaSG0xlLvDQ1p6bcZLzsW/w6AABgcIxops4yFJsAvfnmm1q/fr1aW1sViUQ0duxYnXnmmZo6daqcTrbyptKePX7Zdsb+eAAAgAGIN0t2u51yuRyHPVCJRGyFw7ZCoYgsK6TMvTtEpnM4DI0Z4+v7xBEiEAjon//8p7q6ujR//nyNGjVK//3f/z2gMc4999wURfehmTNnaufOnT2+9oc//EEFBQW69957tWnTpsS8rLy8XJdeeqkWLFggtzv7emgw1wGOrajIJ8MwNOerq1NyX+AwpNXfm6toNKq2Nn/yLwAAQBJl8jwnoxM8SC8mPQAAZD/DMOT1ehLNkvti21FZVkiBQDBjdxsgc2XyxCfdTj31VBUVFWndunXpDgViroPUGg6LKuIJnivuelbdwUjSx881Xfrlt2eT4AEAZIVMnudkXIm2X//61zJNU5dcckm/zn/xxRfV2dmpz3zmMymODAAAILuYpks+nymHI/ZgqWFrm9Zu2qUtO/apqfmArFBEptupspICTSot1IzJ41VVUaTcXI9M0yW/35Jlhfu4CoD+uP/++2WaZrrDAJBkvSVzDKPnRRUul1Mul1M5OW55vWbGLqqIRGy5XLF7hM1N7Ukfv2xcfuI6AABg8DJuB8+pp56qsWPHau3atf06f+bMmWpubs6I5qPDDavaAADIXl6vqbw8jyRpQ2OrHl7dkKh3fyxlJflaNKdKNZXFkqTOzqACASulsWL4yOSVbcChmOtgsA5N6Hg8ThmGkUjmRKPRxH/3d1GFJNm2nXGLKvLzc5ST49ZPnq7X869uS/r4s88/WTdeVq3u7pA6OrqTPj4AAMmUyfOcjNvBIw28+WiG5agA9NNwKF0AAJkontwJh20tr6vXmte39/trm5o7tHTFa6qdVq7F86oTSSKSPACAkezIkqeHJnPa9nWpqDBXhmEcc1FFdzCizU3t2tzUrudf3XbYooqCgtyMWlQRCkWUk+PWjMnjU5LgmT55fOI6AABg8DIywTMQgUAgK5t6AiNZf/pBZEvpAgDINKbpSiR37l31htY3tg5qnDWvb1dre5eWLJh6cLxIRq0sBgBgqBxZ8lRSIpnT3tGtmWeXKRS2tWIYLaqwrJC8XlNVFUUqG5ffr13A/VVWkq+qiqJE3z8AADB4WZ3g2bBhg/bv36/S0tJ0hwKgn+gHAQCpYxiGfL5Yj4/ldfWDTu7ErW9s1fK6et10xRT5fKaCwQiJdgDAiHJoydPWvZ0qHp2XSOZ0WmHdcc3ZCoVt3TfMFlVEo7EkT26uR4vmVmnpiteSNvaiOVWSRKUGAACSIO0Jnrq6OtXV1R12bP/+/Zo/f36vXxONRtXR0aEtW7bIMAydd955qQ4TQBL0tx9ENpUuAIBM4vV65HA4tL6xdUAriI9lzevb9fHq8aqpLJbX65Hfz7+/AICR4dCSpy/9tUmfmjYxkcz55679WnbHTEnSimG6qCIQCMo0XaqpLFbttPKk3FvUTitXTWWxbNtWIBBMQpQAAIxsaU/w7Ny5U3/5y18OOxYKhY461puTTz5ZN998cypCA5BE9IMAgNSK9zWTpJWrG5I69sPPNGjZHTNlmm4FAharbQEAw96hJU9/8PP1WnxZtaQPkzk3XlatAq9nWC+qiEaj8vstFRTkavG8arW2dx1XIuvMymItnhd7H/1+K+0JLAAAhoO0J3jOPffcwxI0P/rRj5SXl6cFCxb0+jWx8iM+nXLKKTr33HPldDqHIlQAg0Q/CABIvXhfs4atbUmtky/FEu0NW9tUVVEk03Sru5t6+QCA4SW+UMLtdsrlcsjpjJWUXl5Xr9M/MuawZE6u6dInzoqVih/uiyosK6zOzqDy8jxasmDqgBfrxcUX67lcDnV2BpnHAQCQJBmR4Dn33HMT/x9P8LArBxge6AcBAEPD7Y4teFm7aVdKxl+3aZeqKorkdjtJ8AAAhg3DMOT1ehILJQ61vrFVr2zYqc9fGusZE0/mzJgyQXk5rhGzqCJeOSEvz6Obrpiij1eP18PPNKipue+/+6HltiVRbhsAgCRLe4LnSH/4wx/YkQMMI/SDAICh4XLFVhpv2bEvJePHx41fBwCAbGeaLvl8phyO2O+2hq1tev3vzbqm9lTleFxaubqhx2TO6SePljSyFlUEApbC4Yh8PlM1lcVadsdMNWxt07pNu7Rlxz41tXTICoZlelwqG5evSaWFmj55vKoqiiRJtm3L77fYuQMAQJJlXIJnwoQJ6Q4BQJLQDwJIvd7KiUhSJGIrHLYVCkVkWSE+J8Nc/Hvf1HwgJePHH2od+jMGAEC2ivcIlaQNja16eHWDmlo69Kmp5crxfJjQuewTkyQdnsyZdFKhpJG3qMKywgoGI4kdT1UVRYkETm9sOyrLCikQCFJ5AQCAFMi4BA+A4YN+EEDqHKucSJzL5ZTL5VROjlter8nkeoSwQpHUjBtkxS0AYHiIJ3fCYfuonjJH7s7pKZlTUuSVdOxFFbmmSzOmTNDpJ4/WpJMKVVLkldvpUChiq7ktoC3v79Pb2/Zq7cad6jpiV0smL6qIRqPy+y0FAhaLjAAAyAAZm+B599139b//+7/629/+pubmZnV1dfV6rmEYevvtt4cwOgD9QT8IIDV6KieyNl4eo/mArFBEptupspICTSot1IyD5TFycz0yTRflMYY50+1UdzD5SR7Tk7G3jQAA9JtpuhLJnXtXvXFUj9AjEzo9JXPcB5MZPS2qKMw3deVFlfrEWaXKyzn6d6fpcKr8xAKVn1igWeeUaeGcKr38tx168sVG7TtYejobFlVEo1J3d4h5GAAAaZaRM/UnnnhCDzzwgCIRmqcDme5Y5aHiRlrpgmxGua++pfs96q2cyJG6gxFtbmrX5qZ2Pf/qtsMa3BYU5NLgdhiKRGy5XLHE3uam9qSPXzYuP3EdAACykWEY8vlMSdLyuvqjkjvS0QmdnpI5oYgt0+E8alHFBTUTtHhetQq8sXu1/i7CmX3+yZoxZYKW19XrTxt2sqgCAAD0W8bdNWzatEnf+ta3JElXXXWVLrzwQt1www0aNWqUHnroIbW1tenPf/6znn32Wfl8Pt19990aO3ZsmqMGRp7+lIeKox9E5qPcV98y4T06VjmRvjQ1d2jpitdUO61ci+dVJ5JEJHmGj3A4luCZVFqYkgTPpNLCxHUAAMhGXq9HDodD6xtbe72POjKh01Myp7ktoPITCxKLKgxDWnhpleZeWCFp8Itw7rjmbJ1SeoLWbdwpiUUVAACgbxmX4Pmf//kfRaNRXXfddbrrrrsSx91ut8477zxJ0qWXXqr58+dr4cKF+q//+i89/fTT6QoXGJH6Wx7qF9+eLYdh0A8iw1Huq2+Z8B71VU6kv9a8vl2t7V1asmDqwfEiw/77N1KEQhHl5Lg1Y/J4Pf/qtqSPP33y+MR1AADINvFd2JK0cnVDr+dF7KgcDiOR0DkymSNJW97fp/ITCxKLKuLJnVDY1orjWIRzw7xqzb2wQiePL5DEogoAANC3jEvwbNiwQYZhaP78+cc877TTTtPdd9+t22+/XStXrtSXv/zlIYoQGNkGUh4qFLZluo8uXZAslC44fpT76lsmvEf9KScyEOsbW7W8rl43XTFFPp+pYJCSqMOBZYXk9ZqqqihS2bj8Hn9OB6usJF9VFUWy7agsi1r7AIDsE9+F3bC1rcffkfFdOO6D5Z/jCZ0jkzmS9Pa2vZp1TplmTB4vf2cwkdy5LwmLcO5eMFXVH41VKWFRBQAA6EvG1TVqa2uTx+PRhAkTEsccDocs6+iHYhdddJFcLpd+97vfDWWIwIh1aHmoZU9t1NIVrx3zAWJzW0BSbHKUCvSDOD4D/X4eKr7ScNlTGxUO28rL88jrNVMc8dDLlPeoP+VEBmrN69u1obFVDodD3oN14pHdolElki+L5lYldexFc2LjjeT+WwCA7OZ2OyVJazft6vH1+C4c2479oouXJn17215J0oyDO1klqX7rbkWjUVVVFOmLn50sSVqRpEU4K+rqJUnRaFRBKhYAAIA+ZFyCJzc3Vzk5OYcd83q98vv9CgaDhx13u93Kzc3Vzp07hzJEYEQ6sjxUfx4yb3l/n6QPJ0fJRj+IwRvM97Mna17frntXvZFIYJjm8NlVlSnvUX/LiQzGw8/ExjNNt4xjt9JClggEgrJtWzWVxaqdVp6UMWunlaumsli2bSsQCPb9BQAAZCDXwZ05W3bsO+q1C2omJHbhPLvun5I+TOis3bhTnd3hxA5ZSfrMBZNkHLx58uW6U7IIxzCMxC5yAACA3mRcgqe4uFiBQEDh8IcrVUpLSyVJ9fX1h53b0tKijo7klR8B0LPBlofqabVbMtEPYnBSVe5Lknw+MzHZzWaZ9B71VU7keDQ1d6hha1uszvzBJBKyWzQald8f2/W8eF61zqwsPq7xzqws1uJ51ZIkv9+ilB8AIGs5nbHHH03NBw47XphvJn7Xrair1xNr3j0sodNlhfXy33ZIiu2QzTVd+sRZsWcU8d+LLMIBAADpknEJnoqKCkUiEW3evDlxbOrUqYpGo/rxj3+cKNUWDAb1rW99S5J0yimnpCVWYKQYbHkoO2onShfEV7slC/0gBo9yX33LpPeor3Iix2vdwXHj10H2s6ywOjuDcrkcWrJg6qB38tROK9eSBVPlcjnU2RmUZVEmBgCQ/awjFoddeVGlCryexH3fkQkdSXryd406EAiqprJYX7zsDOXluLSztUOGwSIcAACQXhmX4Dn//PMVjUb10ksvJY5dddVV8ng8eu2113TBBRfoyiuv1AUXXKDf/e53MgxDV199dRojBoa3wZaHKsw39fl/rUrsVKAfRGag3FffMu09OlY5kWSIjxu/jmFIOTlu5efn6IQT8lRU5Ev8OeGEPOXn5ygnJ7u/xyNBIGAlkjw3XTFF99xwnspK+pdoLyvJ1z03nKebrpiSSO4EAkf3QgQAIBuZhyxqOXQ3zqH3fYcmdGqnlWtfh5XYjX3hmbHz/V2xRWYswgEAAOmUcc0SLr74YrW0tGjcuHGJY6WlpfrP//xP3XXXXdq/f782btwoSXI4HFq4cKHmzJmTrnCBYW+w5aHiK+Hqt7Rp4okFiclRMnZD0A9i8Iai3FdVRZFM063u7uzcWZVp71Fv5USSFtPBv6PT6ZDPZyb+/j1xuZxyuZzKyXHL6zVlWSEFAkHKdmWoQMBSOByRz2eqprJYy+6YqYatbVq3aZe27NinppYOWcGwTI9LZePyNam0UNMnj1dVRZEkybZt+f0WO3cAAMNCJGLL5XKqrKRAm5vaJUkzpkxQXo7rqPu+eELnjmvO1g3zqtXa3qU/bdipU0pP0NwLKyRJBd5YOd+hWoQDAADQk4xL8BQUFOjmm28+6vhFF12kc845R6+88oqam5vl8/k0ffp0lZcnp4EwgJ4NpjzUoSvhlj9dr4njCw6bHB1PPxP6QRyfoSj3VVVRJLfbmbUJnkx9j44sJ5IsVvDDh/e5ubHScQ1b27Q2ngRoPiArFJHpjj0QmVRaqBkHkwC5uR6ZposkQAazrLCCwYi8Xo9M062qiqJEAqc38dKXJO8AAMNJOBxL8EwqLUwkeE4/ebSknu/7Dk3o3L1gqlbU1Wvlbxr0r9NPltPp0JhROZKGZhEOAABAbzIuwXMshYWFmjt3brrDAEaUwZSHOnIlXFNLx1GTo8Hs5KmdVq4b5lUfUkqKGlEDNdTlvrJRpr5Hptup7mBykzyGId3wmTMO/rehDY2tenh1Q487l7qDEW1uatfmpnY9/+o2lZXka9GcKtVUFqugIJcyXhksGo3K77cUCFgyTbfcbqdcLsdhD4wiEVvhsK1QKELpSwDAsBQKRZST49aMyeP1/KvbJEmTTiqU1Pt938rfxMq2zb2wQjddMUUfrx4v4+BOZ9fB36NDsQgHAACgN1mV4AEw9AZTHqqnlXA9TY4efqZBTc19l8A69EGyJNX/Y7eqPzo2q3eJpMtQlvvKVpn2HvVUTiRZFl5apdnTP6Jw2NbyASZem5o7tHTFa6qdVq7F86qVlxfb/UOSJ3NFo1J3d4h/NwEAI5JlheT1mqqqKFLZuHw1tXSopMgrqff7vmg01kOxsaldN15WnZiPSFI4YsvjcKZkEY4kmR4e1wAAgL6l9Y7hrrvuSso4hmHo29/+dlLG6q+f/OQneuihh/TRj35Uzz777GGvrV+/Xg8++KDefvtt+Xw+XXLJJbrtttvk9XqHNEYgmQayMq2nlXA9TY4G2g/iQMDST59+S617O/W9L4/N6l0i6cZKw75lynvUUzmRZLigZoLmXlihUNjWfaveGHTpxDWvb1dre5eWLJiqvDyPwuEI5doAAEDGiUZjSZ7cXI8Wza3S0hWvyd3PXThrN+7UW1vadOWnKnXJeRPlcBjas79bJxZ5U7IIR5LKxuVLii32AQAA6E1aEzx1dXUyDGPQ9d3jXzvUCZ7m5mYtX75ceXl5R732zjvv6Prrr1dFRYXuvPNONTc3a9WqVXrvvff08MMPD1mMQLINZGXaicdYCXfo5OgTZ5X2qx9EZ3dYL/9th558sVH7/JZyzdg/Xdm8SyTdWGnYt0x5j3oqJ3K8CvPNRC+rFXX1x9UXS5LWN7ZqeV29brpiinw+U8FghN4tAAAg4wQCQZmmSzWVxaqdVq5QxJbZz104+/yWfvp0vXy5bl145kk6ELB0YpE36Ytw4iaVxhbNhcMkeAAAQO/S+iTuM5/5TFb20PjOd76jyZMny7ZttbcffiP3/e9/XwUFBXr88cfl8/kkSSeddJLuvvturVu3TtOnT09HyMCgDaQ8VGG+qSsvqpTbdeyVcPHJ0WPPva0ZUybo9JNHq+KkQp1Y5JXb6VAoYuuDtoC2vr9Pb2/bq7Ubd6rrkB0Bw2mXyFBLZbkvaXisNMy096inciLH68qLKlXg9Wh9Y+ug+mH1ZM3r2/Xx6vGqqSyW1+uR30+pNgAAkFnifekKCnK1eF619hzo1rjReQO676vf0qYLzzxJvly3JCV1Ec6hpk8eLym22AcAAKA3aU3wPPDAA+m8/KC8+eab+u1vf6u6ujrdd999h73m9/v15z//Wdddd10iuSNJc+fO1be//W298MILJHiQdfpbHuqCmglaPK9aBV5P4lhfK+G6rLBefGO7XnxjYA+Yh9MukaGWqnJfccNhpWGmvUc9lRM5HrmmS584q1SStHJ1w3GNdaSHn2nQsjtmyjTdCgQssYkHAABkGssKq7MzqLw8j8YW5krSgO771m7cqYVzqjShOF/dVjipi3DiykryVVVRJNuOyrLonQcAAHpHfaMBiEQiuvfee3X55ZersrLyqNcbGxsVDodVVVV12HGPx6PTTjtN77zzzlCFCiRNfMXYjIMryI5kGNKiOVW645qzVeD1aENjq3bt9kuSykoKUhLTcNglki59fT+P13BYaZiJ71EgEJRt24lyIsdjxpQJystxqWFrW1IfREhSU3OHGra2yeEwZJrupI4NAACQLIGApc7OoByOWEWRgdz3dVmx8tGS1NEZlCQtmlt1rC8ZsEVzYuNZVogFMwAA4JhI8AzAk08+qV27dunWW2/t8fXdu3dLkoqLi496bezYsWptPb4eB0A6WFZIth1NrEw70sJLqxKN2pc9tVFLV7ymd97bK+nDnQrJNhx2iaRLX9/P4zFcVhpm4nsULyciSYvnVevMyqN/z/TX6SePliSt3bRr0GMcy7qD47rdzpSMDwBIrkAgoB/+8IdauHChzj33XFVWVurpp5/u8dytW7dq4cKFqqmp0bnnnqs77rhDe/fuHeKIgeQIBCwdONClaHTg931P/q5RBwJBjT0hT91WOCmLcOJqp5WrprJYtm0rEAgmZUwAADB8keDpp/b2dv3whz/Ul770JY0ePbrHc7q7uyXFduwcyTTNxOtANomXh5KOXpl2Qc2ERHLnvlVvJHp5vL0tNtHPpB0QiDnW9/N4DZeVhpn6HsXLibhcDi1ZMHXQDxGmnDJWkrRlx75BfX1f4uO6XNxiAEA2aG9v17Jly/TPf/6zxyoFcc3Nzbr66qvV1NSk2267TQsWLNArr7yiz3/+8woGeQiN7GRZYXV3D/y+b1+HpeV19ZI+vOe54TgX4UjSmZXFWjyvWpLk91uKZvNNNQAAGBI8femnhx56SKNGjdI111zT6zk5OTmS1OMEx7KsxOtAtumpPFRhvpmYfKyoq9f6xg93qK3duFOd3eGM2gGBDyWz3FfccFtpmKnvUbyciMvl0E1XTNE9N5ynspL+fcbKSvJ1zw3nacyoWK35puYDg4qhL/Gyb04ntxgAkA2Ki4u1bt06vfzyy/ra177W63k//elP1dXVpccee0zz58/XjTfeqIceekjvvvuu6urqhjBiILkGe9/3pw07tfqVrXI5HYrYUbldDt19HItwaqeVa8mCqXK5HOrsDMqywoMaBwAAjCx0Ku+H9957T7/85S/1jW9847Aya5ZlKRQK6f3335fP59PYsbFV0T2VYtu9e3ePpduAbBAvD1VQkKvF86rV2t6lcz9WogKvR+sbWxM7d+Lidalnn39yUprCH2q47BJJp56+n4cm6AZqOK40zOT3KBCwFA5H5POZqqks1rI7Zqpha5vWbdqlLTv2qamlQ1YwLNPjUtm4fE0qLdT0yeNVVVGU+LsZhiErRTvgrCAPIwAgm3g8nsQ85lhefPFF/cu//IvGj/9wh/bHP/5xTZw4US+88II+97nPpTJMIGWO575v5W8aJElzL6yQJLkPLsL5ePV4PfxMg5qa++53WFaSr0VzqlRzcPdPZ2dQgYA1yL8NAAAYaUjw9ENLS4ts29Z9992n++6776jXZ82apfnz5+uWW26Ry+VSQ0ODPv3pTydeDwaDeuedd3TJJZcMZdhAUsXLQ+XlebRkwVTZBx9Qr1zd0OP5T/6uUTOmTEishDsyCTQYw22XSDod+f1cXlc/qO9R7bRyLZ5XPSxXGmbye2RZYQWDEXm9HpmmW1UVRYkETm/iu95yctySJNPtVHcw+Uke08OtBQAMNy0tLdqzZ4+qqo4uYVVdXa0//elPaYgKSJ7B3vdFo9LDzzSosaldN15WrQKvR9FodFCLcGzblt9vDav7aQAAkHo8hemHj370o1q2bNlRxx966CEFAgF985vfVGlpqfLz83XeeefpmWee0Ze+9CX5fD5J0urVq9XZ2ana2tqhDh1IqvhKsry8WJ+phq1tiXJMR4rXpb7jmrN1Q4btgEDMod9PVhr2LJPfo/hq00DAkmm65XY75XI5DiuNFonYCodthUKRxK632HlOlZUUaHNTe9LiiYuXZYxE7KSPDQBIj3iFgp52+owdO1b79u1TMBjssRcpkC2O575ve/MBbf9gv86Y9OFnJBqNDmgRTiAQZI4DAAAGjARPP4wePVqf/OQnjzr+2GOPSdJhr91222268sorde211+rf/u3f1NzcrEceeUTTp0/XBRdcMGQxA6kSCFhyuRzyeFxau2nXMc/904adOqX0BM29sEJ3L5iqFRm0AwIxx1vuaySsNMz09ygalbq7Q4kGwX0Jh225XE5NKi1MSYJnUmlh4joAgOHBsmIPvntK4JimKUnq7u4mwYOsl6z7PsMw5HY75XY75XAYR13HtqMKhSKHLcIBAAAYDBI8Sfaxj31MjzzyiL73ve/p/vvvl9fr1eWXX67bb7893aEBSROfpGzZsa/Pcw+tS51pOyAQczzlvkbKSsPh9B6FQhHl5Lg1Y/J4Pf/qtqSPP33y+MR1AADDQzyJEwweXSI3nvzJyckZ0piAVEnWfV9/F98AAAAcDxI8x+Hxxx/v8fjZZ5+tJ598coijAYZOvARUU/OBPs89si51Ju6AwODLfY0kw+U9sqyQvF5TVRVFKhuX32uZxcEoK8lXVUVR4iEHAGB4KC6OLbbZvXv3Ua/t3r1bhYWF7N7BsDJc7vsAAMDwR4IHwKBZA1ihv3bjTr21pU1XfqpSnzirtF8r4aLRqLq7M28HxHA20HJfI1G2v0fRaCzJk5vr0aK5VVq64rWkjb1oTqz5Ng85AGB4GTdunEaPHq2GhoajXquvr9epp56ahqiA1Mv2+z4AADD8Ofo+BQB6ZrqdAzp/n9/ST5+u1/X3/Fb//cuN+sObTXrvgwOyQpHYiv9QRO99cEB//NuOxNf4/RbJHSDJAoGgbNtWTWWxaqeVJ2XM2mnlqqkslm3bCgSOLuEDAMhun/rUp/THP/5RH3zwQeLYa6+9pvfee0+1tbVpjAwAAAAYudjBA2DAIpFYk/aykoJBNWnvssJ68Y3tevGN7T2+Xll2gv7lrFJFIjRpB1IhXnakoCBXi+dVq7W9S+sbWwc93pmVxVo8r1oSSVkAyEZPPPGEDhw4oNbW2O+Cl19+Wc3NzZKka6+9Vvn5+brxxhu1Zs0azZ8/X/Pnz1dnZ6dWrlypU045RZ/97GfTGT4AAAAwYhlRnsKgF3v2+GXb/HjgaPn5OcrJcesnT9enpEn77PNP1o2XVau7O6SOju6kjw8gxus1lZfnUThsa3ldvda83nPS9Vhqp5Vr8bxquVwOdXYGFQhYKYgUQLZwOAyNGeNLdxgYoJkzZ2rnzp09vvaHP/xBJ510kiTpH//4hx544AH97W9/k9vt1oUXXqg777xTRUXHLrubiZjrAAAAoL8yeZ5Dgge9YtKD3uTk4XEdggAAIABJREFUuJWfn6OGrW2668evJn38+790vqoqitTR0U29ayDF4kkeSdrQ2KqHn2lQU3NHn19XVpKvRXOqVFMZa7xNcgeAlNkTH+BQzHUAAADQX5k8z6FEG4ABs6yQvF5TVRVFKhuXr6aWvh8G91dZSb6qKopiPXkskjtAqgUClsLhiHw+UzWVxVp2x0w1bG3Tuk27tGXHPjW1dMgKhmV6XCobl69JpYWaPnm8qipiq7Vt25bfb8mywmn+mwAAAAAAAIwsJHgADFg0Gkvy5OZ6tGhulZaueC1pYy+aUyUpNj77C4GhYVlhBYMReb0emaZbVRVFiQROb+JJ2EAgSM8dAAAAAACANHCkOwAA2SkQCMq2bdVUFqt2WnlSxqydVq6aymLZtq1AIJiUMQH0TzQald9vae9ef6I8YjgcUTQaTfwJhyOJ3lh79/rl91skdwAAAAAAANKEHTwABiX+MLigIFeL51Wrtb1L6xtbBz3emZXFWjyvWpJ4aAykUTQqdXeH6H8FAAAAAACQ4djBA2DQLCuszs6gXC6HliyYOuidPLXTyrVkwVS5XA51dgbp5QEAAAAAAAAAfTCiLJNHL/bs8cu2+fFA37xeU3l5HknShsZWPfxMg5qaO/r8urKSfC2aU6WaymJJUmdnUIGAldJYAQBA6jgchsaM8aU7DKBPzHUAAADQX5k8zyHBg14x6cFAmKZLPp8phyO2MbBha5vWbdqlLTv2qamlQ1YwLNPjUtm4fE0qLdT0yeMTTdxt25bfb7FzBwCALJfJEx/gUMx1AAAA0F+ZPM+hBw+ApLCssILBiLxej0zTraqKokQCpze2HZVlhRQIBOm5AwAAAAAAAAADQIIHQNJEo1H5/ZYCAUum6Zbb7ZTL5ZDT+WG7r0jEVjhsKxSKyLJCIq8DAAAAAAAAAANHggdA0kWjUnd3SN3doXSHAgAAAAAAAADDkqPvUwAAAAAAAAAAAJBJSPAAAAAAAAAAAABkGRI8AAAAAAAAAAAAWYYEDwAAAAAAAAAAQJYhwQMAAAAAAAAAAJBlSPAAAAAAAAAAAABkGRI8AAAAAAAAAAAAWYYEDwAAAAAAAAAAQJYhwQMAAAAAAAAAAJBlSPAAAAAAAAAAAABkGRI8AAAAAAAAAAAAWYYEDwAAAAAAAAAAQJYhwQMAAAAAAAAAAJBlSPAAAAAAAAAAAABkGRI8AAAAAAAAAAAAWYYEDwAAAAAAAAAAQJYhwQMAAAAAAAAAAJBlSPAAAAAAAAAAAABkGRI8AAAAAAAAAAAAWYYEDwAAAAAAAAAAQJYhwQMAAAAAAAAAAJBlXOkOAAAAAACQ2d544w3Nnz+/x9d+8YtfaMqUKUMcEQAAAAASPAAAAACAfrn22mt1xhlnHHasrKwsTdEAAAAAIxsJHgAAAABAv5x99tmqra1NdxgAAAAARA8eAAAAAMAA+P1+hcPhdIcBAAAAjHjs4AEAAAAA9Mtdd92lzs5OOZ1OnXXWWfra1752VMk2AAAAAEODBA8AAAAA4JjcbrcuvvhiXXDBBTrhhBO0detWrVy5UldffbWefPJJnX766ekOEQAAABhxjGg0Gk13EMhMe/b4Zdv8eAAAAKB/HA5DY8b40h0Ghsj27ds1Z84cnX322Vq5cmW6wxkQ5joAAADor0ye59CDBwAAAAAwYOXl5Zo1a5beeOMNRSKRdIcDAAAAjDgkeAAAAAAAg1JSUqJQKKSurq50hwIAAACMOCR4AAAAAACD8v7778s0TeXl5aU7FAAAAGDEIcEDAAAAADimvXv3HnXs3Xff1UsvvaTzzz9fDgdTSwAAAGCoudIdAHAkw5BM0y232ymXyyGn88PJYiRiKxy2FQpFZFkhRemLCgAAAKTcrbfeqpycHNXU1GjMmDHasmWLfvnLXyonJ0df/epX0x0eAAAAMCKR4EHGMAxDXq9HpumWw2H0eI7L5ZTL5VROjlterynLCikQCCpKpgcAAABImU9+8pP6zW9+o0cffVR+v18nnHCCLrroIt18880qLy9Pd3gAAADAiGREeTKOXuzZ45dtD82Ph2m65POZidIODVvbtHbTLm3ZsU9NzQdkhSIy3U6VlRRoUmmhZkwer6qKIkmSbdvy+y1ZVnhIYgUAAEDPHA5DY8b40h0G0KehnOsAAAAgu2XyPIcED3o1VJMer9dUXp5HkrShsVUPr25QU0tHn19XVpKvRXOqVFNZLEnq7AwqELBSGisAAAB6l8kTH+BQJHgAAADQX5k8zyHBg14NxaQnntwJh20tr6vXmte3D3iM2mnlWjyvWi6XgyQPAABAGmXyxAc4FAkeAAAA9Fcmz3McfZ8CpIZpuhLJnXtXvTGo5I4krXl9u+5d9YbCYVt5eR6ZJq2lAAAAAAAAAADDGwkepIVhGPL5TEnS8rp6rW9sPa7x1je2anldvSTJ5zNlGMZxxwgAAAAAAAAAQKYiwdMP//jHP3TLLbdo1qxZmjx5sqZOnaqrr75aL7300lHnPvHEE7rkkktUVVWlGTNm6P7771dnZ2caos5sXq9HDodD6xtbB71z50hrXt+uDY2tcjgc8no9SRkTAAAAAAAAAIBMRIKnH3bt2qVAIKB58+bpm9/8pr70pS9Jkr74xS/qF7/4ReK8Bx98UPfee68++tGP6pvf/KYuvvhiPfHEE/r3f//3dIWekQxDMk23JGnl6oakjv3wM7HxTNMtNvEAAAAAAAAAAIYrIxqN0llyECKRiC677DJZlqU1a9aotbVVn/jEJzR79mx997vfTZz3xBNP6N5779VPfvITzZw5M40RD1yqGo/m5LiVn5+jhq1tuuvHryZ9/Pu/dL6qKorU0dGt7u5Q0scHAABAzzK5+ShwqFTNdQAAADD8ZPI8hx08g+R0OnXiiSeqo6NDkrRx40aFw2HNnj37sPM+/elPS5Kee+65IY8xU7ndTknS2k27UjL+uoPjxq8DAAAAAAAAAMBw40p3ANmks7NT3d3d8vv9eumll/SnP/1Jl1xyiSQpGAxKkkzTPOxrcnNzJUl///vfhzbYDOZyxfKKW3bsS8n48XHj1wEAAAAAAAAAYLghwTMADzzwQKLnjsPh0EUXXaSlS5dKkk4++WRJ0vr16zVt2rTE1/z1r3+VJLW0tAxxtJnL6YwlXpqaD6Rk/KaWjsOuAwAAAAAAAADAcEOCZwCuu+461dbWqrW1VS+88IJs21YoFOvx8rGPfUyTJ0/Wz372M40bN05Tp07V1q1b9R//8R9yu92yLCvN0WceKxRJzbjBcErGBQAAAAAAAAAgUxjRaJTOkoO0YMECHThwQE899ZQMw1BLS4tuvfVWrV+/XlKsT8/111+vN998U9u2bUvs5skWqWo8WlTkk2EYuuKuZ9UdTH6SJ9d06Zffnq1oNKq2Nn/SxwcAAEDPMrn5KHCoVM11AAAAMPxk8jyHHTzH4eKLL9bSpUu1bds2feQjH9G4ceP085//XO+9957a2tpUXl6usWPHavr06Zo4cWK6w80YkYgtl8upspICbW5qT/r4ZePyE9cBACCbGYZkmm653U65XI7Dyo9GIrbCYVuhUESWFRJLdgAAAAAAGFlI8ByH7u5uSZLff/gukYkTJyYSOlu2bNHu3bt12WWXDXV4GSscjiV4JpUWpiTBM6m0MHEdAACykWEY8no9Mk23HA6jx3NcLqdcLqdyctzyek1ZVkiBQFBszgYAAAAAYGQgwdMPe/bs0ZgxYw47FgqFtHr1auXk5KiioqLHr7NtWw8++KByc3N15ZVXDkWoWSEUiignx60Zk8fr+Ve3JX386ZPHJ64DAEC2MU2XfD5TDkdst07D1jat3bRLW3bsU1PzAVmhiEx3bCfspNJCzZg8XlUVRcrN9cg0XfL7LVkW/egAAAAAABjuSPD0w9KlS+X3+3XOOedo3Lhx2r17t37zm9/on//8p+688055vV5J0n333adgMKhTTz1V4XBYzz77rOrr6/XAAw9o/Pjxaf5bZA7LCsnrNVVVUaSycflqaulI2thlJfmqqiiSbUdlWaGkjQsAwFDwek3l5XkkSRsaW/Xw6oYef092ByPa3NSuzU3tev7VbSorydeiOVWqqSxWQUGuOjuDCgSsoQ4fAAAAAAAMISNKHY8+Pffcc/rVr36lzZs3a9++ffJ6vfrYxz6ma665RrNmzUqc9/TTT+uxxx5TU1OTDMNQdXW1brzxRk2bNi2N0Q9eKhuP+nymcnM92tDYqqUrXkvauPfccJ5qKovV1RWU38+DLQBA9ognd8JhW8vr6rXm9e0DHqN2WrkWz6uWy+UgyYO0yOTmo8ChUjnXAQAAwPCSyfMcEjzoVSonPYZhaPToPDkcDi17auOgHmIdqXZauW66Yops29bevZ30IAAAZA3TdKmgIFfhsK17V72h9Y2tgx7rzMpiLVkwVS6XQwcOdFGuDUMqkyc+wKFI8AAAAKC/Mnme40h3ABiZotFoYofN4nnVOrOy+LjGO7OyWIvnVUuS/H6L5A4AIGsYhiGfz5QkLa+rP67kjiStb2zV8rp6SbEds4ZhHHeMAAAAAAAg85DgQdpYVlidnUG5XA4tWTBVtdPKBzVO7bTyxErlzs4gK5UBAFnF6/XI4XBofWNrUna0StKa17drQ2OrHA6HvF5PUsYEABw/w5ByctzKz8/RCSfkqajIl/hzwgl5ys/PUU6OW+TmAQAA0B+UaEOvhqpswVENpZ9pUFPz0Q2lj3RoQ2lJ9BoAAGQdw5BGj/bJ4TB003dfUlNL37//+qusJF/L7pgp245q716/uOPDUMjk0gXAoYa6RJthGPJ6PTJNtxyOvrM3th2VZYUUCASpTgAAAJBmmTzPcaU7ACAQsBQOR+TzmaqpLNayO2aqYWub1m3apS079qmppUNWMCzT41LZuHxNKi3U9MnjVVVRJEmybVt+v8XOHQBA1ok/6GvY2pbU5I4kNTV3qGFrm6oqimSabnV3h5I6PgCgf0zTJZ/PlMMRK6DRsLVNa+NzneYDskIRmW6nykoKNKm0UDMOznVycz0yTRdzHQAAAPSKBA8ygmWFFQxGEqvaqiqKEgmc3rCqDQCQ7dxupyRp7aZdKRl/3aZdqqooktvtJMEDAGlwVLWC1Q09JvS7gxFtbmrX5qZ2Pf/qtsOqFRQU5FKtAAAAAD0iwYOMEY1G5fdbCgQsmaZbbrdTLpdDTueHraIiEVvhsK1QKCLLClFuBgCQ1Vyu2O+4LTv2pWT8+Ljx62D4MwxxHwWk0aGfQY/HKYfDoXDY1vK6+gH1WWtq7tDSFa+pdlq5Fs+rTiSJSPIAAADgUCR4kHGiUam7O8RKYwDAsBd/+N7UfCAl48dXiR/6kB/DU3/6e7hcTrlcTuXkuOX1muyEBpKot89gOGzr3lVvaH1j66DGXfP6drW2d2nJgqnKy/MoHI5Qrg0AAAAJJHgAAADSzApFUjNukIeAIwH9PYD0OvIz+O57ezTxxFHKMV1aXlc/6ORO3PrGVi2vq9dNV0yRz2cqGIyQmAUAAIAkEjwAAABpZ7qd6g4mP8ljerjVG+7o7wGkV0+fwU+ff7JOnThG6xtbB1SW7VjWvL5dH68er5rKYnm9Hvn9fF4BAAAgUa8DAAAgTSIRW5JUVlKQkvHLxuUfdh0ML/EHy+GwrWVPbdTSFa/1mNzpSby/x7KnNioctpWX55HXa6Y4YmB46ekzuHtflz5xVqkkaeXqhqRe7+FnYuOZpltGz5UYAQAAMMKQ4AEAAEiTcDiWeJlUWpiS8ePjxq+D4cM0XYkHy/euemPQuwTWvL5d9656I5HkMU12fQH90dtncMaUCcrLcalha1u/E6791dTcoYatbXI4DJmmO6ljAwAAIDuR4AEAAEiT0MHeOzMmj0/J+NMPjhtKUY8fpIdhGPL5YrttktnfQ5J8PlMGWwOAYzrWZ/D0k0dLktZu2pWSa687OK7b7UzJ+AAAAMguJHgAAADSxLJCsu2oqiqKEuXUkqWsJF9VFUWy7agsK5TUsZFeXq9HDocj6f09NjS2yuFwyOv1JGVMYLg61mdw0kmxnZNbduxLybXj47pcTOUBAABAggcAACBtolElki+L5lYldexFc2LjWVZI0WhSh0YaGYYSpZno74GhFgwG9eCDD2r69Omqrq7WFVdcoVdffTXdYQ2pvj6DJUVeSVJT84GUXD9e9s3pZCoPAAAAEjwAAABpFQgEZdu2aiqLVTutPClj1k4rV01lsWzbViAQTMqYyAym6ZbDYdDfA2lx55136tFHH9Wll16qb37zm3I6nbrhhhv017/+Nd2hDZm+PoPug4kXK0WlMa1gOCXjAgAAIDuR4AEAAEijaDQqv9+SJC2eV60zK4uPa7wzK4u1eF61JMnvtxRl+86wEu+7QX8PDLX6+no999xzuv322/X1r39dn/vc5/TYY49p/Pjx+t73vpfu8IZMX5/BUMSWJJkp+gyZHldKxgUAAEB2IsEDAACQZpYVVmdnUC6XQ0sWTB30Tp7aaeVasmCqXC6HOjuDsixWeg838b4b9PfAUFuzZo2cTqc+97nPJY6ZpqnLL79cGzZs0AcffJDG6IZOX5/B5raAJKmspCAl14/3a4scTCQBAABgZGPmBgAAkAECASuR5Lnpiim654bzVFaS36+vLSvJ1z03nKebrpiSSO4EAlaKI0Y6xPtu0N8DQ+2dd97RxIkT5fP5DjteXV2deH0k6OszuOX9WOJnUmlhSq4fHzccJsEDAAAAif3dAAAAGSIQsBQOR+TzmaqpLNayO2aqYWub1m3apS079qmppUNWMCzT41LZuHxNKi3U9MnjVVVRJEmybVt+v8XOnRGA/h4Yart379bYsWOPOh4/1traOtQhpVVvn8G3t+3VrHPKNGPyeD3/6rakX3f65PGSpFCK/g0AAABAdiHBAwAAkEEsK6xgMCKv1yPTdKuqoiiRwOmNbUdlWSEFAkF67owQptup7mDyH/DS3wO96e7ulsfjOeq4aZqJ10eS3j6Dazfu1MI5VaqqKFLZuPzErrhkKCvJV1VFUeLffAAAAIDaCwAAABkmGo3K77e0d69fHR3d6u4OKRyOKBqNJv6EwxF1d4fU0dGtvXv98vstkjsjQLzvBv09MNRycnIUDAaPOm5ZVuL1kaCvz2CXFdbLf9shSVo0tyqp1140JzaeZYXEP/cAAACQSPAAAABkrGhUiSROe3un2tr8iT/t7Z2J5A8P+kaOeN8N+ntgqI0dO1a7d+8+6nj8WHFx8VCHlBb9+Qw++btGHQgEVVNZrNpp5Um5bu20ctVUFsu2bQUCRyfaAAAAMDKR4AEAAACyRLzvxoyDfTiSjf4e6M2pp56q9957T36//7DjmzZtkiSddtpp6QhryPXnM7ivw9LyunpJ0g3zqnVm5fElv86sLNbief+/vfuOj6rK/z/+npZJJQ0IhFBSIITeqyBKVkTB34qKupZVF0FRwAVEURR2vwrrA3UBQVGRFnqTEkCWZkBKKAGyVIEFJFl6SQgQJpnJ7w8emSXSi07u5PV8PHiAM3PvnJPjnbnvfO49p44kcbcmAAAAiqDAAwAAABjEpUt5crkK3Ot73Eus74Ebefjhh+V0OjV9+nT3Yw6HQ3PmzFHdunVVvnx5D7bu93Orx+CqLZmal7JfNqtZA15pesd38jzcrLI+eKWprFazLlxw6NKl/DttOgAAALyQZdCgQYM83QgUTxcvOpjyBQAAoJixWEyy2SyKLB2glZsz7tl++/ypocqXDlBubp4c11g8/laYTCb5+/vcszah+ChXrpz27dunyZMn6/z588rIyNCQIUO0b98+DR06VBUqVPB0E2/L3WSdWz0Gt/x8XP52m2rEhKtJzXJKqBKm/ZlZysq5+RRrlcoFqc+fGuqx1rEym026cMGh8+cv3VmDAQAAcFeKc84xFXB/N67j1KkcuVz87wEAAFCcmEwmhYX5y2w2a9TMrfph/aG73ufDzSrrjafqyeVy6fTpC3c8BZTZbFJ4eOBdtwfF06VLlzRs2DAtWLBAWVlZio+PV69evdSqVStPN+223U3Wud1jsFW9CnqtUx2VCrj8S4Ht+0/qp23/1b7DZ/XLsXO65MiX3ceqShFBiqsYovvqRqpWbGlJksvlUk7OJe7cAQAA8KDinHMo8OC6KPAAAAAUT3a7VaVK+Sk/36X/G5uqtD3H73hfDeLLuqeAys6+eFe/SC7OwQe40t1mnds9BkMC7XrmoXg90LCi/H2tN91/4VSJ5887WHMHAADAw4pzzqHAg+uiwAMAAFB8BQTY5e/vo/x8l77+Pv2O7uR5uFlldXu8jnt9j7udAqo4Bx/gSvci69zJMehnt6pVvQqqER2m2KgQRZUNlMVskslkksvlksPhVF6eU5cu5TFdNgAAQDFRnHMOBR5cFwUeAACA4q3wF8yStGXPcY2Zv12/HD130+0qlQtSl8dqqX58WUm6Z+t7FOfgA1zpXmWd4nYMAgAA4N4rzjmHAg+uiwIPAABA8We3WxUYaJfZbJbk2fU9inPwAa50L7NOcToGAQAAcO8V55xDgQfXRYEHAADAGEwmkwICfGS322Q2m276+t9qfY/iHHyAK93rrFNcjkEAAADce8U559x8dUcAAAAAxVpBQYFyci7p/PlLstttstksslrNsljM7tc4nS7l57tY3wP4DXAMAgAAwBMo8OC6buXKMwAAABQvDke+HI4bT/dkMplk+g1O9Th/hFH8lv+vevIYBAAAwL1XnHMOU7QBAAAAAAAAAAAYjPnmLwEAAAAAAAAAAEBxQoEHAAAAAAAAAADAYCjwAAAAAAAAAAAAGAwFHgAAAAAAAAAAAIOhwAMAAAAAAAAAAGAwFHgAAAAAAAAAAAAMhgIPAAAAAAAAAACAwVDgAQAAAAAAAAAAMBgKPAAAAAAAAAAAAAZDgQcAAAAAAAAAAMBgKPAAAAAAAAAAAAAYDAUeAAAAAAAAAAAAg6HAAwAAAAAAAAAAYDAUeAAAAAAAAAAAAAyGAg8AAAAAAAAAAIDBUOABAAAAAAAAAAAwGAo8AAAAAAAAAAAABkOBBwAAAAAAAAAAwGAo8AAAAAAAAAAAABgMBR4AAAAAAAAAAACDocADAAAAAAAAAABgMBR4AAAAAAAAAAAADIYCDwAAAAAAAAAAgMFQ4AEAAAAAAAAAADAYCjwAAAAAAAAAAAAGQ4EHAAAAAAAAAADAYCjwAAAAAAAAAAAAGAwFHgAAAAAAAAAAAIOhwAMAAAAAAAAAAGAwFHgAAAAAAAAAAAAMhgIPAAAAAAAAAACAwVDgAQAAAAAAAAAAMBgKPAAAAAAAAAAAAAZDgQcAAAAAAAAAAMBgKPAAAAAAAAAAAAAYDAUeAAAAAAAAAAAAg6HAAwAAAAAAAAAAYDAUeAAAAAAAAAAAAAyGAg8AAAAAAAAAAIDBUOABAAAAAAAAAAAwGAo8AAAAAAAAAAAABkOBBwAAAAAAAAAAwGAo8AAAAAAAAAAAABgMBR7ccwUFBZ5uAgAAAADcU+QcAAAAFDcUeHDXcnJydOrUKV26dEmSZDKZCD9eyuVyeboJuE15eXnKzc31dDNwD/C5anyMoXdgHIGSg5xTcpBzjIec4134bDU2xs97MJbGZCpg5HAXFi9erClTpmj//v0qW7asqlevrt69e6ts2bKebhrugYyMDJ0+fVo+Pj6KiIhQaGiop5uE27B8+XItWrRI+/btU2xsrBo2bKjnnnvO083CLTpx4oRycnLk7++vwMBABQQEeLpJuE2MYclQUFAgk8nk6WYAuMfIOd6NnGNs5Bzj4zzZ2Bi/koOsYwwUeHDHlixZoj59+qhVq1aKj4/Xvn37tHHjRvn5+alv375q06aNAgMDPd1M3KH58+drxIgROnHihCQpODhYPXv2VOvWrQm2BpCcnKx3331XtWvXVnh4uA4cOKD9+/erTZs26t69u2rVqiWzmZs4i6vk5GR99dVXyszMlI+PjypVqqS33npLTZs2lc1m83TzcAsYQ++zdu1arV+/XgcPHlTDhg1Vt25d1atXTxLBB/A25BzvRs4xNnKO8XGebGyMn3ci6xibZdCgQYM83QgYS0FBgc6fP6+hQ4eqWrVqGjhwoBITE5WYmKjGjRtr165dmjFjhoKCghQTEyO73e7pJuM2rV69Wn369FFiYqK6deumxo0bKycnR2PHjtWpU6cUGRlJ+CnGjh8/rr///e9q2bKlBgwYoGeeeUYPPvigqlWrprlz52r9+vWKjIxUpUqV+JIuhlauXKk+ffqoZcuWev7551WlShUdOHBA48aNk9PpVKVKlVSqVClPNxM3wBh6n3nz5untt9/W0aNHlZOTo4ULF2r16tU6ceKEWrZs6Z62ic9UwNjIOd6PnGNs5Bzj4zzZ2Bg/70TWMT4KPLhthQf06NGjFRcXp8cee0wFBQWyWq2KjIxUmzZttHv3bs2aNUvly5dXXFycrFarh1uNW1H4gZ2UlCSHw6GBAweqbt26qlmzphITExUUFKRx48YpMzNT0dHRioiI8HSTcQ3nz5/X6NGj1bZtWz3wwAOSpKCgICUkJKhhw4aaN2+eNm3apLi4OEVFRXm4tShUUFAgl8ulUaNGKSgoSIMGDVKjRo3UtGlTtWjRQpI0duxYXbx4UXFxcQoODvZwi/FrjKF3+s9//qO3335bHTt21IABA/T666/rvvvu08GDBzV37lz9/PPPat++PcEH8ALkHO9FzvEO5Bzj4jzZ2Bg/70XW8Q7ct4o7kpeXJ19fX2VnZ0v634e9JIWFhWno0KFq2LChhg0bpr1790pi4UojKPygPn78uFwulyIjI93P+fr66i9/+Ys+/vhjrVmzRl9//bUOHDjgqabiBs6fP19kEWCHwyHp8vjWr19fo0aNUlZWlr744gudPn1aEgvpFQcmk0kWi0VHjhyR3W5XRESE+3OzYsXWzj6aAAAgAElEQVSKevfdd9WzZ0/NnDlTSUlJOnPmjIdbjF9jDL1TTk6Ozp07p9atW6ty5cqSpDp16mjgwIF69dVXtXTpUr355puSWIAd8AbkHO9EzvEO5Bzj4jzZ2Bg/70XW8Q7cwYM74uPjo9zcXE2cOFHVq1dXbGxskWqu3W5X7dq1tWzZMqWmpuqpp56iymsgGzdu1NatW9WxY0cFBgYqPz9fJpNJJpNJNWrUUGhoqMaPH69SpUqpWbNmcrlcjK+HXXklRWhoqHbu3Kn58+frscceU3BwsJxOp3su6rJly6pKlSqaMGGCHA6HWrVqxfgVIykpKTp06JCef/55mUwm99iZTCY1btxYeXl5Gj9+vKKjo1WjRg2Ov2KIMfQuBw8e1KxZs/T000+rQoUK7u/EoKAgxcfHy9fXV9OmTdPRo0f14IMPMpaAwZFzvBs5x3jIOd6F82RjY/y8D1nHO1DgwR2LiorS+vXr9dNPPyk+Pl6RkZFFwk9AQIAcDocWLlyoJk2aFLlKCsVHYfXdZDIpPz9fZrNZUVFRmjZtmk6cOKHExESZzWb3F7PJZFKdOnWUnZ2tiRMnKjExUaVLl/ZwL1D4JVt4/FmtVq1atUo7duxQy5YtFRAQUOTkKjw8XMePH9fSpUv10EMPcQu1h1x5/OXl5clisSgoKEhJSUnKzc1VixYtZDabiwTXGjVq6NChQ5o+fboefvhhhYSEeLILJR5j6P0sFouWLVumI0eOqGnTpgoICHA/5+/vr5iYGPdc1REREapevboHWwvgXiDneAdyjncg5xgX58nGxviVDGQd70CBB3csICBA9erV0/Tp07Vr1y7FxMSofPny7g9/Hx8fVahQQd99952aNWum+Ph4TzcZ11B4ImwymdxfylarVRcvXtSMGTOUn5+vZs2aucNP4d8hISFKTk5WaGioGjdu7MkulGjbt2/Xpk2btGbNGvn7+8vHx0d2u10xMTE6duyYVqxYoaNHj6pBgwby9/eX0+mUyWSSr6+vbDabZsyYoY4dOzLPuIcU/jJBunxiJUl2u12ZmZlavny5AgICVKtWrSInzoVjl5ycrJo1a/LZ6mGMofcLCgrS4cOHtWDBAlWqVOmqNTcCAgIUHR2tRYsW6eLFi2rfvr0HWwvgXiDneAdyjrGRc4yP82RjY/xKBrKOd6DAg7tSunRpNWrUSBMmTFB6errKly+vypUruz/8N2/erNWrV+uxxx5zz+WI4mPlypUaOXKkvv32W23YsEEOh0ORkZEKCgpS+fLldfjwYS1ZskR5eXlq0qSJzGaz8vLyZLVaFRERoUmTJikiIkJt2rTxdFdKpLlz5+qdd97R8uXLtW7dOk2bNk2HDh1SQECAKleurFatWmnPnj1atmyZfvnlF9WvX19BQUHuk7Rt27Zp48aNeuKJJ1SmTBkP96bkWb16tcaPH68pU6Zo586dCggIUGhoqEJCQlShQgWtXbtWqamp8vf3V82aNWU2m+VwOGSxWBQTE6Nvv/1WlStXVrNmzTzdlRKLMfQ+GRkZOnjwoM6cOSOLxSI/Pz9JUuvWrbV27Vr98MMPiomJUVRUlDv45OfnKzw8XA6HQ7Nnz9bjjz+uwMBAT3YDwD1AzjE2co6xkXOMj/NkY2P8vBNZx3tR4MFdK1eunJo3b6758+dr9uzZKigokM1m086dOzVt2jRlZ2frtdde4wOgmElOTlafPn3k7++vMmXKaMeOHVq8eLE2btyo++67T1FRUYqLi9PevXu1aNEinTx5Uq1bt3aH2vT0dC1dulQtWrRQw4YNPdybkictLU29e/fWE088obfeektdunRRYGCgfvjhB6WkpCg4OFgJCQlq166dfvnlF6WkpGjp0qWKjY2V2WzWvn37NGnSJDmdTr344ovuL3b8PubPn6++ffvK4XAoLy9P69evV3Jysg4fPqwGDRqoSpUqiouL048//qj169fL4XCoYcOG7uNv06ZNWr58uR566CHVqFHDw70pmRhD7zN//nz1799fkyZN0qxZs/T9998rKChIYWFhCgwMVOPGjbVq1SotXrxYkZGRioyMlN1ud4/pqlWrdODAAb388svy8fHxcG8A3AvkHGMi5xgbOcf4OE82NsbPO5F1vBsFHtwTZcuW1f33369Tp05pypQpmjNnjlatWqULFy5o1KhRqlKliqebiCscP35c/fv31/33369Bgwapc+fO6ty5s3Jzc7V27VrNnj1brVu3VrVq1ZSQkKBz585p9uzZSklJ0dmzZ5WWlqaZM2fqyJEj6t+/P/Mae8BPP/2kTZs26Z133lHt2rUVHBysJk2aqEqVKtqzZ49mzpypMmXKqGbNmmrbtq1sNpv27dunMWPGaPbs2Vq6dKlOnjypUaNGqWLFip7uTomSkZGhfv36qV27dho0aJBeeeUVPfnkkzpw4IBSUlK0evVqPfjgg6patapq1qypnTt3at68eUpPT5fZbNbmzZs1c+ZMnTx5Um+99ZZKlSrl6S6VOIyh91m9erX69OmjxMREdevWTY0bN1ZOTo7Gjh2rU6dOuX8Z2LhxY6Wmpmr27NnKy8tThQoVFBwcrN27d2vOnDkqVaqUOnToQOgBvAg5x1jIOcZHzjE2zpONjfHzTmQd72cqKFw1C7gHnE6n9u/fr59//lmhoaGqWrWqypYt6+lm4VcyMjLUsWNHDRgwQE888YTy8/NltVqVl5enhQsXasSIEcrPz9f06dNVvnx5HT16VGlpaRozZowyMzNltVpVsWJF/e1vf2NOVQ/58ssv9eWXX2rbtm2yWCxyOBzuL9kNGzZo2LBh2rt3rz766CO1a9dOknT69GmtXbtWGRkZCg8PV4sWLVShQgVPdqNE2rVrl55++ml9/vnnSkxMlNPpdI/hmDFjNGHCBJUvX15jx45VWFiY/vOf/2jZsmXuXzYEBASofPny+sc//sEChx7CGHqPwgWbP/roI6WlpWnkyJHuxdJzc3M1efJkDR06VC1atNBf//pX1a5dW+fOndP777+vlJQU9zoc586dU05OjpKSklStWjUP9wrAb4GcYwzkHOMj5xgb58nGxvh5F7JOyWG9+UuAW2exWFStWjUO+GLOZrMpLy9Pp0+flnR5sVGn0ymbzabHHntMNptNQ4YMUdeuXZWUlKRy5crpkUceUbt27XTw4EH5+fkpMDCQqzE8oDCkxsfHy2QyadasWXryySfl4+PjXhy2SZMm6t69u4YMGaLvvvtOcXFxio2NVVhYmDp06ODpLpR4VqtVDodDOTk5ki5/bjqdTvn4+Khr166y2WwaM2aM3nvvPX366aeKiYnRq6++queff147d+5UaGioQkNDFRYW5uGelFyMofconKv/+PHjcrlc7sAjSb6+vvrLX/6ikJAQvf/++/Lz81OfPn0UExOjESNGaMmSJdq5c6cOHjyoKlWq6PHHH+dKfsCLkXOMgZxjXOQc78B5srExft6FrFNyMEUbUAIUVu0lyeVySZK2bNmi7du3q169eipdurTMZrP76ozY2FhZrVYtX75cDodDTZs2VUFBgSwWi8LCwhQUFCS73e7JLpU4O3bsUNmyZWU2myVJZcqU0eLFi7V37141btxYYWFhMplMcrlcMplMqlSpknx8fDRt2jTVqlVLCQkJcjqd7u3x+zpz5ox7/u/8/HytXLlSJ06cUOPGjRUYGOg+/qxWq+rUqaOTJ09q2bJlKl26tGrVqiWn0ym73a7IyEiFhYUxl7iHORwO/fjjj4yhF9m4caO2bt2qjh07KjAwUPn5+TKZTDKZTKpRo4ZCQ0M1fvx4lSpVSk2bNpXJZFJcXJyaN2+u9u3bq3nz5goJCfF0NwCgxCHnGB85x/jIOt6DnOOdyDrejwIPUAKYTCbl5ubKYrHIbDa7F0qbOnWqrFaratSoIX9//yLhp06dOkpNTdWOHTvUuXNn2Ww2T3ejxFq6dKleeuklZWRkKDExUZJkt9tVvXp1TZo0Sf/973/VtGlT+fn5yWQyuQNOzZo1tW7dOu3Zs0edOnVyf4Hj97V8+XKNGTNGx48fV926dRUYGCiTyaSkpCRFRESoVq1a7mOz8MS5RYsWSk5OVkZGhjp16kRg9bCDBw9q37592rRpkypXrqzQ0FA5nU5NnjyZMTSgwtmJTSaT8vPzZTabFRUVpWnTpunEiRNKTEyU2Wx2/yLJZDKpTp06ys7O1sSJE5WYmKjw8HAP9wIAIJFzjI6cY3xkHWMj53gfsk7JRIEH8HI//fSTRo4cqU8//VTLly/Xf//7XzVp0kTx8fG6ePGiJk6cqMDAQMXGxsrPz09ms1mXLl2SzWZTWFiYJk6cqFatWhW5lRO/H4fDoXHjxmn37t3KysrS7t273eGn8NbnpKQkZWRkqG7dugoKCipygrVo0SLl5eXpiSeeIPR4wLx58/Tuu+8qOjpajRs3VnR0tCQpISFBmZmZmjRpkqKiolStWjWZTCb38efj4yOz2azZs2erbdu27isX8ftbtGiRBg0apClTpmjx4sVatmyZHnvsMTVp0kS//PILY2hAheNQOF7S5ekoLl68qBkzZig/P1/NmjVzB5/Cv0NCQpScnKzQ0FA1btzYk10AAIicY3TkHOMj6xgbOcc7kXVKJgo8gBebN2+ePvjgA7lcLsXGxurw4cNatWqVTp48qdatW6tRo0Y6deqUxo8fL4vFoqioKAUHB8tqvbw815o1a7Rlyxa99NJL3I7pIRaLRcePH1dqaqrq1aunNWvW6NChQ2rbtq1sNpsiIyMVHh6uadOmaefOnSpbtqwqVqwoSdq3b58WLFig2NhYtWnThivbfmdbt25V//791alTJ3Xp0kX169d3P2exWFS7dm3t3btXSUlJCg8PV8WKFeXr61vk+Nu1a5deeuklBQYGeqobJdqSJUv0zjvvqEWLFnrhhRfUsGFDrVixQllZWWrdurViYmLc4YcxNIaVK1dq5MiR+vbbb7VhwwY5HA5FRkYqKChI5cuX1+HDh7VkyRLl5eWpSZMmMpvNysvLk9VqVUREhCZNmqSIiAi1adPG010BgBKNnGN85BxjI+sYGznHO5F1Si4KPICX2rhxoz744AM9+uijevvtt/Xss8/q4Ycf1vbt25WamqomTZooMjJSdevW1aVLlzR27FhlZmbKbrcrJiZGmzZt0pw5c2QymdS5c2fmUfUgk8mkXbt2qXfv3srOztaSJUv0yy+/6MEHH5S/v79iYmJUrVo1zZ07V8nJyUpNTdWGDRs0bdo0ZWRkaPDgwSpdujSh53eWkpKinTt3qlevXu4Fmb///nulpqZq69atql69uurWravs7GyNGTNGFy5ccJ94paena86cOfL399fjjz/OXPAecOzYMQ0aNEgtWrRQz5491aRJE9WuXVurV69WcHCwHnjgAYWFhalevXrKyMjQ+PHjGcNiLjk5WX369JG/v7/KlCmjHTt2aPHixdq4caPuu+8+RUVFKS4uTnv37tWiRYvcvyS0WCySpPT0dC1dulQtWrRQw4YNPdwbACi5yDneg5xjXGQd4yLneCeyTslm9XQDANx7Fy5c0IIFCxQeHq4nn3xSlStXliSVLl1a/fr105NPPqmtW7eqVq1aCgsL0/vvv6/KlSvrq6++0o8//qiAgABZrVZZLBaNGTNGYWFhHu5RyVa1alUdPnxYu3bt0sCBA5Wfn68lS5ZIkgYPHqxSpUopJiZG8+fP1xdffKHt27fr+PHjqlKlij755BPFxsZ6uAclU3p6uqxWq2rWrClJeu211/TTTz/JbDbL4XBo4sSJev311/Xaa68pOjpao0aN0uzZs1WqVCm5XC7l5+e7FzrE7+/8+fM6cOCAnnvuOVWoUME953ThlaNDhw5VUFCQOnXqpA8//FDVqlXT6NGjGcNi6vjx4xo5cqQ6duyot956SxEREcrNzdWoUaM0Z84cPfXUUxo7dqwSEhL07rvvaty4cZo6darS09P1hz/8QZK0du1aXbhwQe3atfNwbwCg5CLneBdyjnGRdYyLnON9yDqgwAN4qQMHDuiBBx5Q9erVJf1vobXSpUsrJCREBw8edD9uMpn0/PPPq1GjRjp06JC2b9+uSpUqqVmzZu4veXiG0+mU3W5Xq1attG3bNj399NN67733JF2+rdrlcikrK0sul0uff/65PvzwQ7lcLjkcDlksFvn4+Hi4ByVXSEiIXC6XXC6XBg8erC1btugf//iHqlWrpoyMDI0bN06DBw9W37591b17dzVo0EA///yz9u7dq+joaCUmJqpSpUqe7kaJ5XA4lJubq4sXL7ofmzlzpubNm6fo6Gjl5eXp5MmTmjFjht577z316NFDjRs3ZgyLKYfDoWPHjqlRo0aKiIhQfn6+fH191bNnT8XGxmrEiBF66aWXNH36dMXHx+utt95S69atNWbMGI0ZM8YdeidMmMCYAoCHkXO8AznH2Mg6xkXO8T5kHZgKCs+GAHiVw4cPy2KxKDIy0r1wWqFOnTqpQoUK+uKLL+R0Ot23ZKL4mjVrlj777DPNmjVLFSpUUFZWlj788EOtWLFC+fn5+uyzz/TII48wnsXITz/9pC5duujjjz/Wrl27ZLfb1bt3b/f4nDlzRn379tXOnTv1/fffq1y5ch5uMa7kcDjUu3dvLVu2TM2bN5fVatXq1avVpUsXPfvssypXrpw2b96szz//XEeOHNH06dMZw2Ls2LFjatu2rXr16qVXX31Vktyfly6XS4sXL9aQIUMUGhqqpKQk93oMTqdTBw8elJ+fnwIDA7lKEQCKAXKOdyHnGBNZx7jIOd6HrAPW4AG8VHBwsIKCgiTJPSdx4VVs33//vXx8fNShQwd3IMrMzOTDvBhzuVz617/+pcTERJUpU0a+vr6aPXu2MjIyZLfb5evrq7Zt2xYJuPCssLAwbdu2TTNnztSWLVvUsGFDtWzZ0v28n5+fKlSooOnTp8vf319NmzZ1H6PwPIvFooYNGyo/P1979+7V+fPnFRoaqn79+qlixYoym80qU6aMypQpoxkzZsjlcqlVq1aMYTFUeC1TWlqatm/frnr16ql06dIym83u4BMbGyur1arly5fL4XC4j0eLxaKwsDAFBQUxvzgAFBPkHO9CzjEmso5xkXO8C1kHksQ3JFCCuFwuSZK/v78uXbrkfnz//v3q3bu3unTp4qmm4SZq1aqloKAgLV68WHl5eXr99deVnp6ugQMH6sEHH9Ts2bP10UcfebqZuEJgYKAGDBggf39/FRQUKDMzU+fOnZP0v2MxISFBgYGBunDhgiRxwlzMRERE6L333tPUqVP1wQcfqFmzZu5b1h0Oh3x8fNSkSRMFBwcrNzdXEmNY3BQG0cDAQD3zzDPavXu3Zs2apZMnT0q6HHALg8+LL76oOnXq6Mcff+QqYQAwGHKOcZFzjImsY2zkHO9A1kEhCjxACVL4Ae7n56cLFy4oLy9P+/bt09ChQ7V//3717NnTwy3EtRSeINetW1d79uxRjx49tGnTJg0aNEhPPfWUPvjgAz3xxBP605/+5OGW4teqVq2qr776SlarVYsWLdLUqVN16dIl9xWIu3btko+PjyIiIiT97+obFB9ms1mBgYHy8fHR5MmTtXr1akmSj4+PCgoKtHXrVklShQoVJDGGxUFqaqpSUlKUn58vk8kkp9MpSXrkkUfUtWtXTZ48WTNmzNDp06clXf5uLDwuX375Ze3fv1/bt2/3ZBcAALeJnGNM5BxjI+sYGznHmMg6uBarpxsA4Pfn5+en3Nxc7d+/X8OGDVNqaqqmTp3qXqgUxUvhCfIf/vAHdevWTUFBQfroo4/0wAMPSLq8wOXf//53rsAopurUqaOZM2fqtdde0+eff649e/bovvvuk8PhUHJyslwulxITEyVxVVRxFh0drbi4OI0aNUoWi0UtWrTQunXrNGHCBPn4+OjRRx+VxBh62r/+9S/17NlTCQkJ7isPC+eeNpvN6tatm86ePauRI0fq0qVLevLJJ1WxYkX3lAQZGRkqVaqUSpcu7eGeAADuBDnHWMg5xkfWMT5yjnGQdXA9rMEDlCAul0smk0mrVq3Snj17tGXLFm3dulVTpkxRQkKCp5uHm6hYsaJiY2PVrl07tWrVSr6+vu7nmJO6eCtTpowSExOVm5urxYsXa8mSJdq8ebOsVqtGjRqlmJgYTzcRN2G321WrVi2NGzdOM2fO1OTJk/XDDz/o4sWL+uqrrxjDYmDPnj36+OOPZbfblZ2drY0bNyomJkaRkZHuXwz5+Piofv36ys3N1dixY5WZmSm73a6YmBht2rRJc+bMkclkUufOneXn5+fhHgEAbhU5x9jIOcZG1jE2co4xkHVwI6YC7rEDSpyRI0dq5MiRCggI0OTJk7mizUBcLpd7MTwYT0FBgQ4cOKD//Oc/CgkJUXR0tMLDwz3dLNyG/fv3a+bMmTpy5Ihq1aql9u3bKyoqytPNKvHy8vI0cuRIff311xo8eLASEhLUvXt3BQYGqn///mratOlVn5uTJk3SV199pTNnziggIEBWq1UWi0VjxozhexEADIqcY1zkHOMj6xgbOaf4IuvgZijwACXQ3r179c4772jo0KGKjY31dHMAALhrq1at0sqVKzVw4EBJ0qZNm/T2228rICBA7733njv4FC5GKkm7d+/WoUOHtH37dlWqVEnNmjVTxYoVPdkNAMBdIOcAALwRWQc3QoEHKKEcDod8fHw83QwAAO4Zp9NZJNikpaWpT58+VwUfAID3IucAALwRWQfXwxo8QAnFhz4AwNsUztNfeNVa+fLlVadOHSUnJys1NVXR0dGqVKmSJCk9PV0mk0kBAQEeay8A4N4j5wAAvBFZB9fDHTwAAADwaoVXtwUGBuq9996Ty+XSoEGDVK5cOY0dO1Y2m83TTQQAAACA20bWAQUeAAAAeL309HT16tVLPj4+cjqdysrK0vjx41WzZk1PNw0AAAAA7hhZp2RjijYAAAB4rcI5qiMiImSxWLRgwQJJ0uTJk5WQkODh1gEAAADAnSHrQJKsnm4AAAAA8FspnKN67dq1mjdvngICAjRlyhTFxcV5uGUAAAAAcOfIOpAo8AAAAMDL5ebmavbs2dqzZ49mzpxJ4AEAAADgFcg6YA0eAAAAeL1ffvlFLpdLVapU8XRTAAAAAOCeIeuUbBR4AAAAAAAAAAAADMbs6QYAAAAAAAAAAADg9lDgAQAAAAAAAAAAMBgKPAAAAAAAAAAAAAZDgQcAAAAAAAAAAMBgKPAAAAAAAAAAAAAYDAUeAAAAAAAAAAAAg6HAAwAAAAAAAAAAYDAUeAAAAAAAAAAAAAyGAg8AAAAAAAAAAIDBUOABAAAAAAAAAAAwGAo8AAAAAAAAAAAABkOBBwAAAAAAAAAAwGAo8AAAAAAAAAAAABgMBR4AAAAAAAAAAACDocADAAAAAAAAAABgMBR4AAAAAAAAAAAADIYCDwAAAAAAAAAAgMFQ4AEAAAAAAAAAADAYCjwAgGItPj5e8fHxSk1Nvaf7feGFFxQfH68vvvjinu73VmVkZLj7lpGR4ZE2XM/MmTMVHx+vgQMH3vN9L1y4UPHx8Xr77bfv+b4BAAAAoyDn/P7IOQC8EQUeAADgdv78eQ0fPlw+Pj56/fXXr3r+3XffVXx8vB588MEb7mfUqFHuYNezZ085HA5JUvv27RUXF6cFCxZox44dv0kfAAAAAOBK5BwA3ooCDwAAHmCz2RQdHa3o6GjZbDZPN8ftu+++04kTJ/TUU0+pXLlyt719QUGBBg8erBEjRkiSOnfurGHDhsnHx0eSZDab1b17dxUUFOiTTz65p20HAAAA4FnkHHIOgN+X1dMNAACgJIqIiNAPP/zg6WYUkZubq6SkJEnSs88+e9vbO51ODRgwQHPmzJEkdenS5ZpTFLRr106hoaFKTU3Vv//9b9WuXfvuGg4AAACgWCDnkHMA/L64gwcAAEiSFi1apOzsbCUkJKhq1aq3ta3D4VCvXr3coadv377XnX/aarXqkUcekSRNnz797hoNAAAAADdAzgHgzbiDBwC83AsvvKANGzbozTff1Ouvv65JkyZp7ty5OnTokHx9fVW/fn317NlT1atXlyRdvHhR48aN06JFi5SRkSG73a7mzZurd+/eqlSp0jXf48SJExo7dqxWrVqlzMxMSVKFChV0//3365VXXlHp0qWv276srCyNHj1aS5cu1bFjxxQcHKwGDRqoa9euqlWr1k3753K5lJyc7J7rODs7W4GBgapRo4Y6deqkRx99VCaT6Q5+crdn//79Gj9+vDZs2KCjR4/K5XIpLCxMERERatasmf7f//t/io2Ndb8+IyNDbdu2lSQtX75cUVFR7ufi4+Nv6T0ff/xx/eMf/7jq8c2bN2vq1KnavHmzTp48KR8fH0VHR+uhhx7Sc889p4CAgGvub8aMGZKkDh063HK/pcvzWb/xxhtat26dLBaL/va3v+mpp5664TYdOnTQ5MmTtXDhQvXv3/+6bQIAAACuhZxDziHnAAAFHgAoMfLz89WlSxetW7dONptNNptNp0+f1vLly7Vu3TpNnDhRUVFReuWVV7Rz507Z7XaZTCadPXtWixcv1oYNGzRr1ixFRkYW2e+GDRv0xhtvKDs7W5Lk7+8vSdq3b5/27dunWbNm6csvv1SjRo2ualNGRoZefPFFd1iy2Wy6ePGilixZohUrVmj48OE37NPZs2f15ptvauPGje7HgoKCdObMGa1Zs0Zr1qzRwoUL3Ytp/lbWrFmj1157zb3Aps1mk5+fn44ePaqjR49q27Ztstls6tGjxy3t70ZBUZJOnTqlgoKCqx53uVwaPHiwe/oB6fJ4XLx4Uf/+97/173//W3PmzNF3332nChUqFNn23Llz2rZtmyRdc6yu5+zZs3r11VeVnp4um82mzz77TO3atbvpdrVr15bdbteFC1eA6r0AAA4NSURBVBe0adMm3X///bf8ngAAAEAhcg45h5wDoCRjijYAKCGmTJmiXbt2afjw4dqyZYvS0tI0c+ZMVaxYURcuXNDHH3+sDz74QFlZWfruu++0detWbdmyRePHj1dYWJhOnTqlzz//vMg+jxw54g49cXFxmjJlirZs2aItW7Zo8uTJio6OVlZWlt544w0dO3asyLZOp1O9evVSZmamgoODNWzYMG3dulWbN2/WwoULVbduXb377rvX7Y/T6VSPHj20ceNGJSQkaPTo0dq6das2bdqkLVu26JNPPlF4eLhWrFihTz/99Df5mRYaNGiQHA6H7rvvPi1YsEDbt2/Xxo0blZ6eruTkZPXo0eOqoHEjhaHtWn969uzpDj2tW7cust2IESOUlJSk8PBwffjhh0pNTdWWLVu0bds2TZw4UTVq1NCBAwfUo0cPuVyuIttu3rxZLpdLNptNNWrUuKV2Hjt2TM8//7zS09Pl7++vb7755pZCj6Qi73NlcAUAAABuBznnt0POIecAKP4o8ABACZGdna1Ro0bp4Ycfls1mk8lkUp06dfR///d/kqQtW7Zo9erVGjdunO677z6ZzWaZzWY1b95cffr0kSQtXbpUeXl57n2OHj1a2dnZCg4O1vjx49WwYUP3c40aNdL48eMVGBios2fP6uuvvy7SniVLlmj79u2SpOHDh6t9+/ayWi/fWBoXF6cxY8YoJCTkuv1ZsGCBNmzYoJiYGCUlJemBBx6Qn5+fpMtXc/3xj3/UN998I5PJpClTpujUqVP34Kd4tVOnTumXX36RJA0ZMkTVqlVzP2e321W1alW9+eab6tSp012/15o1a/T3v/9dktSjRw/3/M7S5asEv/nmG/n6+mrs2LF67rnn3D8/m82mpk2bKikpSeXKldOOHTu0YsWKIvsuvKotOjr6lq4CzMnJ0Z/+9Cft3btXISEhGj9+vFq0aHFb/SkMPlu3br2t7QAAAIBC5BxyDjkHQElGgQcASoiGDRte85b0Jk2auE9027Vrp8qVK1/1mlatWkmScnNzdejQIUlSQUGBfvjhB0nSM888ozJlyly1Xbly5fTMM89IkhYuXFjkuUWLFkmSGjRooObNm1+1rZ+fn7p06XLd/syePVuS9OyzzyooKOiar6lVq5aqVq2qvLw8paamXndfdyMgIEBm8+Wv0xMnTvwm7yFJe/fuVa9evZSfn68OHTrozTffLPL8999/L6fTqVatWrnnGf+1wMBAJSYmSpJWr15d5Lnjx49LkkJDQ2+pPVlZWcrIyJAkde3aVXXr1r2t/lz5XoXvDQAAANwucg45h5wDoCRjDR4AKCHq1KlzzcctFotCQ0N17Ngx1a5d+5qvCQ8Pd/87KytL0uUrqc6ePStJ1wwuhVq2bKkxY8bo7NmzOnz4sCpWrChJ7qvamjVrdt1tr/ec0+l0Xw01cuTIq66au1Jhewvnv77XfH191bx5c61Zs0ZdunTRM888ozZt2ighIeGezYd98uRJdevWTefOnVP9+vU1ZMiQq16TlpYm6fLVby1btrzuvi5cuCBJ+u9//1vk8dOnT0vSDa8mvFJISIhCQkJ08OBB/fOf/1TFihX10EMP3dK2hYKDg4u8NwAAAHC7yDnkHImcA6Dk4g4eACghAgICrvtc4ZQB13tN4fPS5UVMJRWZCiAiIuK6+77yuStPcAu3v9G25cqVu+bjWVlZ7oU+s7KydPLkyev+KZxqITc397rvc7c++ugjVa9eXadPn9aXX36pzp07q0GDBnr22Wfdoe9O5ebmqnv37srMzFRUVJS+/PLLawaqwqvDLly4cMOfR2Hw+fXP48qFU29FQECAkpKSFB0drby8PP31r391X+l4q3x9fSVJly5duq3tAAAAgELkHHIOOQdAScYdPAAAw3E6ne5/f/vtt1ctwvl7i4yM1Pfff681a9YoJSVFaWlp2rNnj9LS0pSWlqZvvvlGw4cPv+EVgNdSUFCgfv36adu2bQoKCtLXX3+tsLCwa7628Gfy6quvqm/fvrfdh8Ir2rKzs295m7JlyyopKUl//vOftX//fvXp00cul6vInNk3UhgIb/VqOgAAAMCbkXOuRs4BgBvjDh4AwB25cjqDY8eOXfd1Vz535Ul74fa3uu2VQkJC3Ffb/foWfE8xm81q1aqVBgwYoDlz5ig1NVWffvqpIiMjlZWVpb59+7qvHrtVn3/+uZYsWSKLxaJ//vOfiouLu+5rC+cGv9OfR+E80YVTPdyqMmXKaOLEiYqLi1N+fr769u3rnnf8Zgrf63phDgAAAPi9kXOKIueQcwAUbxR4AAB3JCoqyn1F0rp16677urVr10q6HFYK56WWLi8MKumGi4KuX7/+mo/bbDb3PNorV668vYb/TgIDA9WxY0d9/PHHki7PL/3zzz/f8vazZs3SN998I0l6//333QvAXk+DBg0kXf5538lUAIWhqnBB0dtRunRpTZw4UVWrVpXT6VTfvn2VnJx80+0K3ys2Nva23xMAAAD4LZBzboycQ84BULxQ4AEA3BGTyaT27dtLkqZPn64TJ05c9Zpjx45p+vTpkqQOHToUea7w9vbNmzdfM/zk5ubqu+++u+77P/3005KklJQUpaSk3LCtdzM39M3c7Go1u93u/rfZfGtfu+vWrdOgQYMkSS+88IKee+65m27zxBNPyGq16syZMxoxYsQNX+twOHT+/PkijzVu3FjS5TnD7yT8hIeHa+LEiapWrZqcTqf69eun+fPn33Cb9PT0Iu8NAAAAeBo55zJyzmXkHADFHQUeAMAde+2111SqVCmdPXtWL7/8stLS0tzPbd68WS+//LKys7MVEhKirl27Ftn2oYceUs2aNSVJPXv21JIlS9zzK+/fv1+vvvpqkcVKf+2xxx5TixYtVFBQoDfeeENffvllkakOLly4oPXr1+tvf/ubEhMT72W3i9iyZYs6duyo8ePHa//+/XK5XJIuzyudlpbmDjDlypVTfHz8Tfd38OBB9ezZU3l5ebr//vvVv3//W2pHpUqV9Prrr0uSxowZo379+hW5ki4/P1+7du3SyJEj9dBDD2nXrl1Fto+NjXVPJ1EYSG5XWFiYJkyYoPj4eDmdTr3zzjuaN2/eNV974sQJ9zQLTZo0uaP3AwAAAH4L5BxyzpXIOQCKM6unGwAAMK5y5cpp1KhR6t69u/bu3atnn31W/v7+ki4HD0kqVaqURo0apYiIiCLbWq1WDR8+XC+88IKOHDminj17ysfHR3a7XefOnZPNZtPw4cPVvXv3a763xWLRF198ob59+2rlypUaPny4hg8frsDAQJnNZp07d04FBQXu9/ot/fzzzxoyZIiGDBkim82mgIAA5eTkKD8/X9LlaQw+++wzWSyWm+4rLS3NvQDo1q1bb7iwavv27TVgwAD3f7/xxhtyOp366quvNG/ePM2bN0++vr7y9fXVuXPniizaajKZrtrfo48+qokTJ2r58uW3vIDorxWGn5deekm7d+/Wu+++q4KCAv3xj38s8roVK1ZIkhISEpi6AAAAAMUKOecycs7/kHMAFFcUeAAAd6VJkyZatGiRxo0bp5SUFGVmZspkMik2Nlb333+/XnnlFffCmL9WsWJFzZ07V6NHj9bSpUt17Ngx2e12tWjRQl27dnXPX309gYGBGj16tFJSUjR37lxt3bpVJ0+eVEFBgSIiIhQXF6emTZu6p1j4LdSuXVvDhg1Tamqq0tPTdfz4cZ09e1Y+Pj6qWrWqWrZsqRdffPGq4HcrbrYQaE5OTpH/NplM6tWrl9q3b6+pU6cqNTVVR44cUU5OjkqVKqUqVaqoQYMG+sMf/qD69etftb9nnnlGEydO1IoVK3ThwgV3iL1doaGhmjBhgl5++WXt3LlT/fv3l8vlUqdOndyvWbBggaT/TUEBAAAAFCfkHHLOr5FzABRHpoLCsj8AACjxXnzxRaWmpuqTTz656mq0eyUjI0OJiYkKCAhQSkqKAgMDf5P3AQAAAACJnAPAe7EGDwAAcPvrX/8q6fL81r/VNSDffvutCgoK1K1bN0IPAAAAgN8cOQeAt6LAAwAA3OrXr6927dpp7969Wrx48T3f/5EjRzR79mxFRkbqz3/+8z3fPwAAAAD8GjkHgLdiDR4AAFBEv379VLVqVffiqfdSZmamunXrpqZNm8put9/z/QMAAADAtZBzAHgj1uABAEDSokWL9PHHH9/WNu3bt9eAAQN+oxYBAAAAwN0h5wCAd+MOHgAAJOXm5urkyZO3tU1OTs5v1BoAAAAAuHvkHADwbtzBAwAAAAAAAAAAYDBmTzcAAAAAAAAAAAAAt4cCDwAAAAAAAAAAgMFQ4AEAAAAAAAAAADAYCjwAAAAAAAAAAAAGQ4EHAAAAAAAAAADAYCjwAAAAAAAAAAAAGAwFHgAAAAAAAAAAAIOhwAMAAAAAAAAAAGAwFHgAAAAAAAAAAAAMhgIPAAAAAAAAAACAwVDgAQAAAAAAAAAAMBgKPAAAAAAAAAAAAAZDgQcAAAAAAAAAAMBgKPAAAAAAAAAAAAAYDAUeAAAAAAAAAAAAg6HAAwAAAAAAAAAAYDAUeAAAAAAAAAAAAAyGAg8AAAAAAAAAAIDBUOABAAAAAAAAAAAwGAo8AAAAAAAAAAAABkOBBwAAAAAAAAAAwGAo8AAAAAAAAAAAABgMBR4AAAAAAAAAAACDocADAAAAAAAAAABgMBR4AAAAAAAAAAAADIYCDwAAAAAAAAAAgMFQ4AEAAAAAAAAAADAYCjwAAAAAAAAAAAAGQ4EHAAAAAAAAAADAYP4/zpHFZa8FJa8AAAAASUVORK5CYII=\n", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "def plot_from_csv_summarize(csv_file, to_png_file=\"./default_summarize.png\", dpi=300):\n", - " \"\"\"Plot pruning png with csv_summarize file.\"\"\"\n", - " pdf = pd.read_csv(csv_file)\n", - " print(pdf.keys())\n", - " \n", - " pdf_readable, up_map = make_human_readable(pdf, [\"flops\", \"model_size\", \"params\", \"input_num\"])\n", - " print(up_map)\n", - " # print(pdf_readable)\n", - "\n", - " plot_pruning_case(pdf_readable, x_label=[\"model_size(K)\"]*4, \n", - " y_label=[\"accuracy\", \"params(K)\", \"latency_sum\", \"flops(M)\"], \n", - " to_png_file=to_png_file, dpi=dpi)\n", - " \n", - "plot_from_csv_summarize(csv_file=\"./resnet20_prune_ea_gpu_latency_batchsize_1.csv\", dpi=100)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.9" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -}