diff --git a/.code.yml b/.code.yml index fdc9bc47e1..c1862ec06b 100644 --- a/.code.yml +++ b/.code.yml @@ -14,4 +14,4 @@ source: # 此处备注的第三方代码在后续统计代码量环节会被排除,若代码库中不存在需要排除的第三方代码,该项配置标识可为空 third_party_source: #第三方代码文件的正则表达式,若无统一标识格式,可以指定具体目录,样例可参考test_source举例 - filepath_regex: ["/vendor/.*", "/src/tools/monstache/vendor/.*"] + filepath_regex: ["/vendor/.*"] diff --git a/docs/overview/installation.md b/docs/overview/installation.md index dde0b28681..280229c4ea 100644 --- a/docs/overview/installation.md +++ b/docs/overview/installation.md @@ -6,7 +6,6 @@ * Redis >= 3.2.11 * MongoDB >= 4.2 * Elasticsearch >= 7.0.0 (用于全文检索功能) -* Monstache >= 6.0.0 (用于全文检索功能) ## CMDB 微服务进程清单 @@ -30,9 +29,11 @@ * cmdb_operationserver * cmdb_synchronizeserver * cmdb_taskserver +* cmdb_syncserver ### 4. 资源管理服务进程 +* cmdb_cacheservice * cmdb_coreservice --- @@ -182,13 +183,6 @@ MongoDB官方资料: > db.createUser({user: "cc",pwd: "cc",roles: [ { role: "readWrite", db: "cmdb" } ]}) ``` -- 开启ES情况(用于全文检索, 可选, 控制开关见第9步的full_text_search) - -``` json - > use cmdb - > db.createUser({user: "cc",pwd: "cc",roles: [ { role: "readWrite", db: "cmdb" },{ role: "readWrite", db: "monstache" } ]}) -``` - **注:以上用户名、密码、数据库名仅作示例展示,用户使用中可以根据实际情况自行配置。如果安装的MongoDB的版本大于等于3.6,需要手动修改init.py自动生成的配置文件,详细步骤参看init.py相关小节。** 详细手册请参考官方资料 [MongoDB](https://docs.mongodb.com/manual/reference/method/db.createUser/) @@ -207,13 +201,7 @@ MongoDB官方资料: 如果想部署高可用可扩展的ES,可参考官方文档[ES-Guide](https://www.elastic.co/guide/index.html) -### 7. 部署Monstache (用于全文检索, 可选, 控制开关见第9步的full_text_search) - -蓝鲸CMDB针对需求场景采用定制化的Monstache组件,组件以及其插件SO请从指定的Release Package中获取。 - -插件基于Monstache v6.0.0+, 需要依赖Elasticsearch v7+和MongoDB v4.2+。 - -阅读[蓝鲸CMDB全文检索插件文档](../../src/tools/monstache/README.md), 按照指引进行安装部署。 +CMDB提供场景层服务`cmdb_syncserver`用于将MongoDB中的数据同步到Elasticsearch中。阅读[同步服务文档](../../src/scene_server/sync_server/readme.md),按照指引进行全文检索同步配置。 ### 8. 部署CMDB @@ -230,6 +218,7 @@ drwxr-xr-x 4 root root 4096 Jun 18 15:24 cmdb_eventserver drwxr-xr-x 5 root root 4096 Jun 18 15:24 cmdb_hostserver drwxr-xr-x 4 root root 4096 Jun 18 15:24 cmdb_operationserver drwxr-xr-x 5 root root 4096 Jun 18 15:24 cmdb_procserver +drwxr-xr-x 3 root root 4096 Jun 18 10:33 cmdb_syncserver drwxr-xr-x 3 root root 4096 Jun 18 10:33 cmdb_synchronizeserver drwxr-xr-x 5 root root 4096 Jun 18 15:24 cmdb_taskserver drwxr-xr-x 4 root root 4096 Jun 18 15:24 cmdb_toposerver @@ -249,31 +238,32 @@ drwxr-xr-x 7 root root 4096 Jun 18 10:33 web 各目录代表的服务及职责: -| 目标 | 类型 | 用途描述 | -| ---------------------- | ---------- | ------------------------------------------------------------ | -| cmdb_adminserver | server | 负责系统数据的初始化以及配置管理工作 | -| cmdb_apiserver | server | 场景层服务,api 服务 | -| cmdb_coreservice | server | 资源管理层,提供原子接口服务 | -| cmdb_datacollection | server | 场景层服务,数据采集服务 | -| cmdb_eventserver | server | 场景层服务,事件推送服务 | -| cmdb_hostserver | server | 场景层服务,主机数据维护 | -| cmdb_operationserver | server | 场景层服务,提供与运营统计相关功能服务 | -| cmdb_procserver | server | 场景层服务,负责进程数据的维护 | -| cmdb_synchronizeserver | server | 场景层服务,数据同步服务 | -| cmdb_taskserver | server | 场景层服务,异步任务管理服务 | -| cmdb_toposerver | server | 场景层服务,负责模型的定义以及主机、业务、模块及进程等实例数据的维护 | -| cmdb_webserver | server | web server 服务子目录 | -| docker | Dockerfile | 各服务的Dockerfile模板 | -| image.sh | script | 用于制作Docker镜像 | +| 目标 | 类型 | 用途描述 | +|------------------------|------------|---------------------------------------------| +| cmdb_adminserver | server | 负责系统数据的初始化以及配置管理工作 | +| cmdb_apiserver | server | 场景层服务,api 服务 | +| cmdb_coreservice | server | 资源管理层,提供原子接口服务 | +| cmdb_datacollection | server | 场景层服务,数据采集服务 | +| cmdb_eventserver | server | 场景层服务,事件推送服务 | +| cmdb_hostserver | server | 场景层服务,主机数据维护 | +| cmdb_operationserver | server | 场景层服务,提供与运营统计相关功能服务 | +| cmdb_procserver | server | 场景层服务,负责进程数据的维护 | +| cmdb_syncserver | server | 场景层服务,负责cmdb与第三方组件的数据同步 | +| cmdb_synchronizeserver | server | 场景层服务,多个cmdb之间的数据同步服务 | +| cmdb_taskserver | server | 场景层服务,异步任务管理服务 | +| cmdb_toposerver | server | 场景层服务,负责模型的定义以及主机、业务、模块及进程等实例数据的维护 | +| cmdb_webserver | server | web server 服务子目录 | +| docker | Dockerfile | 各服务的Dockerfile模板 | +| image.sh | script | 用于制作Docker镜像 | | init.py | script | 用于初始化服务及配置项,在需要重置服务配置的时候也可以运行此脚本,按照提示输入配置参数 | -| init_db.sh | script | 初始化数据库的数据 | -| ip.py | script | 查询主机真实的IP脚本 | -| restart.sh | script | 用于重启所有服务 | -| start.sh | script | 用于启动所有服务 | -| stop.sh | script | 用于停止所有服务 | -| tool_ctl | ctl | 管理小工具 | -| upgrade.sh | script | 用于全量升级服务进程 | -| web | ui | CMDB UI 页面 | +| init_db.sh | script | 初始化数据库的数据 | +| ip.py | script | 查询主机真实的IP脚本 | +| restart.sh | script | 用于重启所有服务 | +| start.sh | script | 用于启动所有服务 | +| stop.sh | script | 用于停止所有服务 | +| tool_ctl | ctl | 管理小工具 | +| upgrade.sh | script | 用于全量升级服务进程 | +| web | ui | CMDB UI 页面 | ### 9. 初始化 diff --git a/docs/resource/img/sync-server/simple_sync.png b/docs/resource/img/sync-server/simple_sync.png new file mode 100644 index 0000000000..f1d5f760bd Binary files /dev/null and b/docs/resource/img/sync-server/simple_sync.png differ diff --git a/docs/support-file/dockerfile/monstache/dockerfile b/docs/support-file/dockerfile/monstache/dockerfile deleted file mode 100644 index 3d300a3203..0000000000 --- a/docs/support-file/dockerfile/monstache/dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM ccr.ccs.tencentyun.com/bk.io/centos7-cmdb:base -ENV container docker -COPY cmdb_monstache /data/cmdb/monstache -RUN chmod +x /data/cmdb/monstache/monstache diff --git a/docs/support-file/dockerfile/syncserver/dockerfile b/docs/support-file/dockerfile/syncserver/dockerfile new file mode 100644 index 0000000000..53f123a6b9 --- /dev/null +++ b/docs/support-file/dockerfile/syncserver/dockerfile @@ -0,0 +1,9 @@ +FROM ccr.ccs.tencentyun.com/bk.io/centos7-cmdb:base +ENV container docker +COPY cmdb_syncserver /data/cmdb/cmdb_syncserver +RUN mkdir /data/cmdb/cmdb_syncserver/logs +RUN chmod +x /data/cmdb/cmdb_syncserver/cmdb_syncserver +#time zone setting +ENV TimeZone=Asia/Shanghai +RUN ln -snf /usr/share/zoneinfo/$TimeZone /etc/localtime && echo $TimeZone > /etc/timezone + diff --git a/docs/support-file/helm/README.md b/docs/support-file/helm/README.md index 3dba2b2a1c..b462a1522d 100644 --- a/docs/support-file/helm/README.md +++ b/docs/support-file/helm/README.md @@ -1,5 +1,3 @@ - - # BK-CMDB 蓝鲸配置平台(蓝鲸CMDB)是一个面向资产及应用的企业级配置管理平台。 @@ -21,11 +19,9 @@ - Kubernetes 1.12+ - Helm 3+ - - ### 安装Chart - 使用以下命令安装名称为`bkcmdb`的release, 其中``代表helm仓库地址, password为自己设置的任意密码: +使用以下命令安装名称为`bkcmdb`的release, 其中``代表helm仓库地址, password为自己设置的任意密码: ```shell $ helm repo add bkee @@ -34,8 +30,6 @@ $ helm install bkcmdb bkee/bkcmdb --set mongodb.auth.password=${password} --set 上述命令将使用默认配置在Kubernetes集群中部署bkcmdb, 并输出访问指引。 - - ### 卸载Chart 使用以下命令卸载`bkcmdb`: @@ -46,8 +40,6 @@ $ helm uninstall bkcmdb 上述命令将移除所有和bkrepo相关的Kubernetes组件。 - - ## Chart依赖 - [bitnami/mongodb](https://github.com/bitnami/charts/tree/master/bitnami/mongodb) @@ -61,347 +53,346 @@ $ helm uninstall bkcmdb ### 镜像配置 -| 参数 | 描述 | 默认值 | -| :-------------: | :----------: | :----------: | -| image.registry | 镜像源域名 | mirrors.tencent.com | -| image.pullPolicy | 镜像拉取策略 | IfNotPresent | +| 参数 | 描述 | 默认值 | +|:----------------:|:------:|:-------------------:| +| image.registry | 镜像源域名 | mirrors.tencent.com | +| image.pullPolicy | 镜像拉取策略 | IfNotPresent | ### 启动时初始化配置说明 启动时会执行job,分别对cmdb依赖的mongodb数据库进行初始化操作,以及往GSE注册dataid -| 参数 | 描述 | 默认值 | -| :----------------------: | :---------------------------: | :----: | -| migrate.enabled | 是否在执行helm时启动该job | true | -| migrate.image.repository | 初始化job所需要的镜像仓库地址 | migrate | -| migrate.image.tag | 初始化job所需要的镜像版本 | {TAG_NAME} | -| migrateDataId | 是否在启动时往GSE注册dataid | false | +| 参数 | 描述 | 默认值 | +|:------------------------:|:------------------:|:----------:| +| migrate.enabled | 是否在执行helm时启动该job | true | +| migrate.image.repository | 初始化job所需要的镜像仓库地址 | migrate | +| migrate.image.tag | 初始化job所需要的镜像版本 | {TAG_NAME} | +| migrateDataId | 是否在启动时往GSE注册dataid | false | ### 蓝鲸产品URL配置 -| 参数 | 描述 | 默认值 | -| :------: | :------: | :---------------------: | -| bkPaasUrl | paas地址 | http://paas.example.com | -| bkIamApiUrl | bkiam后端地址 | http://bkiam-web | -| bkComponentApiUrl | 蓝鲸ESB地址 | http://bkapi.paas.example.com | -| bkLoginApiUrl | 蓝鲸登录地址 | http://bk-login-web | -| bkNodemanUrl | 节点管理地址 | http://apps.paas.example.com/bk--nodeman | +| 参数 | 描述 | 默认值 | +|:-----------------:|:---------:|:----------------------------------------:| +| bkPaasUrl | paas地址 | http://paas.example.com | +| bkIamApiUrl | bkiam后端地址 | http://bkiam-web | +| bkComponentApiUrl | 蓝鲸ESB地址 | http://bkapi.paas.example.com | +| bkLoginApiUrl | 蓝鲸登录地址 | http://bk-login-web | +| bkNodemanUrl | 节点管理地址 | http://apps.paas.example.com/bk--nodeman | ### adminserver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| adminserver.enabled | 是否在执行helm时启动 | true | -| adminserver.image.repository | 服务镜像名 | cmdb_adminserver | -| adminserver.image.tag | 服务镜像版本 | {TAG_NAME} | -| adminserver.replicas | pod副本数量 | 1 | -| adminserver.port | 服务端口 | 80 | -| adminserver.configDir | 需要的配置文件路径 | /data/cmdb/cmdb_adminserver/configure | -| adminserver.errors | 需要的错误文件路径 | /data/cmdb/cmdb_adminserver/conf/errors | -| adminserver.language | 需要的语言文件路径 | /data/cmdb/cmdb_adminserver/conf/language | -| adminserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_adminserver/logs | -| adminserver.command.logLevel | 日志等级 | 3 | -| adminserver.command.logToStdErr | 是否把日志输出到stderr | false | -| adminserver.workDir | 工作目录 | /data/cmdb/cmdb_adminserver | +| 参数 | 描述 | 默认值 | +|:-------------------------------:|:--------------:|:-----------------------------------------:| +| adminserver.enabled | 是否在执行helm时启动 | true | +| adminserver.image.repository | 服务镜像名 | cmdb_adminserver | +| adminserver.image.tag | 服务镜像版本 | {TAG_NAME} | +| adminserver.replicas | pod副本数量 | 1 | +| adminserver.port | 服务端口 | 80 | +| adminserver.configDir | 需要的配置文件路径 | /data/cmdb/cmdb_adminserver/configure | +| adminserver.errors | 需要的错误文件路径 | /data/cmdb/cmdb_adminserver/conf/errors | +| adminserver.language | 需要的语言文件路径 | /data/cmdb/cmdb_adminserver/conf/language | +| adminserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_adminserver/logs | +| adminserver.command.logLevel | 日志等级 | 3 | +| adminserver.command.logToStdErr | 是否把日志输出到stderr | false | +| adminserver.workDir | 工作目录 | /data/cmdb/cmdb_adminserver | ### apiserver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| apiserver.enabled | 是否在执行helm时启动 | true | -| apiserver.image.repository | 服务镜像名 |cmdb_apiserver | -| apiserver.image.tag | 服务镜像版本 | {TAG_NAME} | -| apiserver.replicas | pod副本数量 | 1 | -| apiserver.port | 服务端口 | 80 | -| apiserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_apiserver/logs | -| apiserver.command.logLevel | 日志等级 | 3 | -| apiserver.command.logToStdErr | 是否把日志输出到stderr | false | -| apiserver.workDir | 工作目录 | /data/cmdb/cmdb_apiserver | +| 参数 | 描述 | 默认值 | +|:-----------------------------:|:--------------:|:------------------------------:| +| apiserver.enabled | 是否在执行helm时启动 | true | +| apiserver.image.repository | 服务镜像名 | cmdb_apiserver | +| apiserver.image.tag | 服务镜像版本 | {TAG_NAME} | +| apiserver.replicas | pod副本数量 | 1 | +| apiserver.port | 服务端口 | 80 | +| apiserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_apiserver/logs | +| apiserver.command.logLevel | 日志等级 | 3 | +| apiserver.command.logToStdErr | 是否把日志输出到stderr | false | +| apiserver.workDir | 工作目录 | /data/cmdb/cmdb_apiserver | ### authserver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| authserver.enabled | 是否在执行helm时启动 | true | -| authserver.image.repository | 服务镜像名 | cmdb_authserver | -| authserver.image.tag | 服务镜像版本 | {TAG_NAME} | -| authserver.replicas | pod副本数量 | 1 | -| authserver.port | 服务端口 | 80 | -| authserver.ingress.enabled | 开启ingress访问 | true | -| authserver.ingress.hosts | ingress代理访问的域名 |cmdb-auth.example.com| -| authserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_authserver/logs | -| authserver.command.logLevel | 日志等级 | 3 | -| authserver.command.logToStdErr | 是否把日志输出到stderr | false | -| authserver.workDir | 工作目录 | /data/cmdb/cmdb_authserver | +| 参数 | 描述 | 默认值 | +|:------------------------------:|:--------------:|:-------------------------------:| +| authserver.enabled | 是否在执行helm时启动 | true | +| authserver.image.repository | 服务镜像名 | cmdb_authserver | +| authserver.image.tag | 服务镜像版本 | {TAG_NAME} | +| authserver.replicas | pod副本数量 | 1 | +| authserver.port | 服务端口 | 80 | +| authserver.ingress.enabled | 开启ingress访问 | true | +| authserver.ingress.hosts | ingress代理访问的域名 | cmdb-auth.example.com | +| authserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_authserver/logs | +| authserver.command.logLevel | 日志等级 | 3 | +| authserver.command.logToStdErr | 是否把日志输出到stderr | false | +| authserver.workDir | 工作目录 | /data/cmdb/cmdb_authserver | ### cacheservice服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| cacheservice.enabled | 是否在执行helm时启动 | true | -| cacheservice.image.repository | 服务镜像名 | cmdb_cacheservice | -| cacheservice.image.tag | 服务镜像版本 | {TAG_NAME} | -| cacheservice.replicas | pod副本数量 | 1 | -| cacheservice.port | 服务端口 | 80 | -| cacheservice.command.logDir | 日志存放路径 | /data/cmdb/cmdb_cacheservice/logs | -| cacheservice.command.logLevel | 日志等级 | 3 | -| cacheservice.command.logToStdErr | 是否把日志输出到stderr | false | -| cacheservice.workDir | 工作目录 | /data/cmdb/cmdb_cacheservice | +| 参数 | 描述 | 默认值 | +|:--------------------------------:|:--------------:|:---------------------------------:| +| cacheservice.enabled | 是否在执行helm时启动 | true | +| cacheservice.image.repository | 服务镜像名 | cmdb_cacheservice | +| cacheservice.image.tag | 服务镜像版本 | {TAG_NAME} | +| cacheservice.replicas | pod副本数量 | 1 | +| cacheservice.port | 服务端口 | 80 | +| cacheservice.command.logDir | 日志存放路径 | /data/cmdb/cmdb_cacheservice/logs | +| cacheservice.command.logLevel | 日志等级 | 3 | +| cacheservice.command.logToStdErr | 是否把日志输出到stderr | false | +| cacheservice.workDir | 工作目录 | /data/cmdb/cmdb_cacheservice | ### cloudserver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| cloudserver.enabled | 是否在执行helm时启动 | true | -| cloudserver.image.repository | 服务镜像名 | cmdb_cloudserver | -| cloudserver.image.tag | 服务镜像版本 | {TAG_NAME} | -| cloudserver.replicas | pod副本数量 | 1 | -| cloudserver.port | 服务端口 | 80 | -| cloudserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_cloudserver/logs | -| cloudserver.command.logLevel | 日志等级 | 3 | -| cloudserver.command.logToStdErr | 是否把日志输出到stderr | false | -| cloudserver.command.enableCryptor | 是否开启加密服务 | false | -| cloudserver.workDir | 工作目录 | /data/cmdb/cmdb_cloudserver | +| 参数 | 描述 | 默认值 | +|:---------------------------------:|:--------------:|:--------------------------------:| +| cloudserver.enabled | 是否在执行helm时启动 | true | +| cloudserver.image.repository | 服务镜像名 | cmdb_cloudserver | +| cloudserver.image.tag | 服务镜像版本 | {TAG_NAME} | +| cloudserver.replicas | pod副本数量 | 1 | +| cloudserver.port | 服务端口 | 80 | +| cloudserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_cloudserver/logs | +| cloudserver.command.logLevel | 日志等级 | 3 | +| cloudserver.command.logToStdErr | 是否把日志输出到stderr | false | +| cloudserver.command.enableCryptor | 是否开启加密服务 | false | +| cloudserver.workDir | 工作目录 | /data/cmdb/cmdb_cloudserver | ### coreservice服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| coreservice.enabled | 是否在执行helm时启动 | true | -| coreservice.image.repository | 服务镜像名 | cmdb_coreservice | -| coreservice.image.tag | 服务镜像版本 | {TAG_NAME} | -| coreservice.replicas | pod副本数量 | 1 | -| coreservice.port | 服务端口 | 80 | -| coreservice.command.logDir | 日志存放路径 | /data/cmdb/cmdb_coreservice/logs | -| coreservice.command.logLevel | 日志等级 | 3 | -| coreservice.command.logToStdErr | 是否把日志输出到stderr | false | -| coreservice.workDir | 工作目录 | /data/cmdb/cmdb_coreservice | +| 参数 | 描述 | 默认值 | +|:-------------------------------:|:--------------:|:--------------------------------:| +| coreservice.enabled | 是否在执行helm时启动 | true | +| coreservice.image.repository | 服务镜像名 | cmdb_coreservice | +| coreservice.image.tag | 服务镜像版本 | {TAG_NAME} | +| coreservice.replicas | pod副本数量 | 1 | +| coreservice.port | 服务端口 | 80 | +| coreservice.command.logDir | 日志存放路径 | /data/cmdb/cmdb_coreservice/logs | +| coreservice.command.logLevel | 日志等级 | 3 | +| coreservice.command.logToStdErr | 是否把日志输出到stderr | false | +| coreservice.workDir | 工作目录 | /data/cmdb/cmdb_coreservice | ### datacollection服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| datacollection.enabled | 是否在执行helm时启动 | true | -| datacollection.image.repository | 服务镜像名 | cmdb_datacollection | -| datacollection.image.tag | 服务镜像版本 | {TAG_NAME} | -| datacollection.replicas | pod副本数量 | 1 | -| datacollection.port | 服务端口 | 80 | -| datacollection.command.logDir | 日志存放路径 | /data/cmdb/cmdb_datacollection/logs | -| datacollection.command.logLevel | 日志等级 | 3 | -| datacollection.command.logToStdErr | 是否把日志输出到stderr | false | -| datacollection.workDir | 工作目录 | /data/cmdb/cmdb_datacollection | +| 参数 | 描述 | 默认值 | +|:----------------------------------:|:--------------:|:-----------------------------------:| +| datacollection.enabled | 是否在执行helm时启动 | true | +| datacollection.image.repository | 服务镜像名 | cmdb_datacollection | +| datacollection.image.tag | 服务镜像版本 | {TAG_NAME} | +| datacollection.replicas | pod副本数量 | 1 | +| datacollection.port | 服务端口 | 80 | +| datacollection.command.logDir | 日志存放路径 | /data/cmdb/cmdb_datacollection/logs | +| datacollection.command.logLevel | 日志等级 | 3 | +| datacollection.command.logToStdErr | 是否把日志输出到stderr | false | +| datacollection.workDir | 工作目录 | /data/cmdb/cmdb_datacollection | ### eventserver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| eventserver.enabled | 是否在执行helm时启动 | true | -| eventserver.image.repository | 服务镜像名 | cmdb_eventserver | -| eventserver.image.tag | 服务镜像版本 | {TAG_NAME} | -| eventserver.replicas | pod副本数量 | 1 | -| eventserver.port | 服务端口 | 80 | -| eventserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_eventserver/logs | -| eventserver.command.logLevel | 日志等级 | 3 | -| eventserver.command.logToStdErr | 是否把日志输出到stderr | false | -| eventserver.workDir | 工作目录 | /data/cmdb/cmdb_eventserver | +| 参数 | 描述 | 默认值 | +|:-------------------------------:|:--------------:|:--------------------------------:| +| eventserver.enabled | 是否在执行helm时启动 | true | +| eventserver.image.repository | 服务镜像名 | cmdb_eventserver | +| eventserver.image.tag | 服务镜像版本 | {TAG_NAME} | +| eventserver.replicas | pod副本数量 | 1 | +| eventserver.port | 服务端口 | 80 | +| eventserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_eventserver/logs | +| eventserver.command.logLevel | 日志等级 | 3 | +| eventserver.command.logToStdErr | 是否把日志输出到stderr | false | +| eventserver.workDir | 工作目录 | /data/cmdb/cmdb_eventserver | ### hostserver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| hostserver.enabled | 是否在执行helm时启动 | true | -| hostserver.image.repository | 服务镜像名 | cmdb_hostserver | -| hostserver.image.tag | 服务镜像版本 | {TAG_NAME} | -| hostserver.replicas | pod副本数量 | 1 | -| hostserver.port | 服务端口 | 80 | -| hostserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_hostserver/logs | -| hostserver.command.logLevel | 日志等级 | 3 | -| hostserver.command.logToStdErr | 是否把日志输出到stderr | false | -| hostserver.workDir | 工作目录 | /data/cmdb/cmdb_hostserver | +| 参数 | 描述 | 默认值 | +|:------------------------------:|:--------------:|:-------------------------------:| +| hostserver.enabled | 是否在执行helm时启动 | true | +| hostserver.image.repository | 服务镜像名 | cmdb_hostserver | +| hostserver.image.tag | 服务镜像版本 | {TAG_NAME} | +| hostserver.replicas | pod副本数量 | 1 | +| hostserver.port | 服务端口 | 80 | +| hostserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_hostserver/logs | +| hostserver.command.logLevel | 日志等级 | 3 | +| hostserver.command.logToStdErr | 是否把日志输出到stderr | false | +| hostserver.workDir | 工作目录 | /data/cmdb/cmdb_hostserver | ### operationserver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| operationserver.enabled | 是否在执行helm时启动 | true | -| operationserver.image.repository | 服务镜像名 | cmdb_operationserver | -| operationserver.image.tag | 服务镜像版本 | {TAG_NAME} | -| operationserver.replicas | pod副本数量 | 1 | -| operationserver.port | 服务端口 | 80 | -| operationserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_operationserver/logs | -| operationserver.command.logLevel | 日志等级 | 3 | -| operationserver.command.logToStdErr | 是否把日志输出到stderr | false | -| operationserver.workDir | 工作目录 | /data/cmdb/cmdb_operationserver | +| 参数 | 描述 | 默认值 | +|:-----------------------------------:|:--------------:|:------------------------------------:| +| operationserver.enabled | 是否在执行helm时启动 | true | +| operationserver.image.repository | 服务镜像名 | cmdb_operationserver | +| operationserver.image.tag | 服务镜像版本 | {TAG_NAME} | +| operationserver.replicas | pod副本数量 | 1 | +| operationserver.port | 服务端口 | 80 | +| operationserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_operationserver/logs | +| operationserver.command.logLevel | 日志等级 | 3 | +| operationserver.command.logToStdErr | 是否把日志输出到stderr | false | +| operationserver.workDir | 工作目录 | /data/cmdb/cmdb_operationserver | ### procserver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| procserver.enabled | 是否在执行helm时启动 | true | -| procserver.image.repository | 服务镜像名 | cmdb_procserver | -| procserver.image.tag | 服务镜像版本 | {TAG_NAME} | -| procserver.replicas | pod副本数量 | 1 | -| procserver.port | 服务端口 | 80 | -| procserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_procserver/logs | -| procserver.command.logLevel | 日志等级 | 3 | -| procserver.command.logToStdErr | 是否把日志输出到stderr | false | -| procserver.workDir | 工作目录 | /data/cmdb/cmdb_procserver | +| 参数 | 描述 | 默认值 | +|:------------------------------:|:--------------:|:-------------------------------:| +| procserver.enabled | 是否在执行helm时启动 | true | +| procserver.image.repository | 服务镜像名 | cmdb_procserver | +| procserver.image.tag | 服务镜像版本 | {TAG_NAME} | +| procserver.replicas | pod副本数量 | 1 | +| procserver.port | 服务端口 | 80 | +| procserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_procserver/logs | +| procserver.command.logLevel | 日志等级 | 3 | +| procserver.command.logToStdErr | 是否把日志输出到stderr | false | +| procserver.workDir | 工作目录 | /data/cmdb/cmdb_procserver | ### taskserver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| taskserver.enabled | 是否在执行helm时启动 | true | -| taskserver.image.repository | 服务镜像名 | cmdb_taskserver | -| taskserver.image.tag | 服务镜像版本 | {TAG_NAME} | -| taskserver.replicas | pod副本数量 | 1 | -| taskserver.port | 服务端口 | 80 | -| taskserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_taskserver/logs | -| taskserver.command.logLevel | 日志等级 | 3 | -| taskserver.command.logToStdErr | 是否把日志输出到stderr | false | -| taskserver.workDir | 工作目录 | /data/cmdb/cmdb_taskserver | +| 参数 | 描述 | 默认值 | +|:------------------------------:|:--------------:|:-------------------------------:| +| taskserver.enabled | 是否在执行helm时启动 | true | +| taskserver.image.repository | 服务镜像名 | cmdb_taskserver | +| taskserver.image.tag | 服务镜像版本 | {TAG_NAME} | +| taskserver.replicas | pod副本数量 | 1 | +| taskserver.port | 服务端口 | 80 | +| taskserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_taskserver/logs | +| taskserver.command.logLevel | 日志等级 | 3 | +| taskserver.command.logToStdErr | 是否把日志输出到stderr | false | +| taskserver.workDir | 工作目录 | /data/cmdb/cmdb_taskserver | ### toposerver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| toposerver.enabled | 是否在执行helm时启动 | true | -| toposerver.image.repository | 服务镜像名 | cmdb_toposerver | -| toposerver.image.tag | 服务镜像版本 | {TAG_NAME} | -| toposerver.replicas | pod副本数量 | 1 | -| toposerver.port | 服务端口 | 80 | -| toposerver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_toposerver/logs | -| toposerver.command.logLevel | 日志等级 | 3 | -| toposerver.command.logToStdErr | 是否把日志输出到stderr | false | -| toposerver.workDir | 工作目录 | /data/cmdb/cmdb_toposerver | +| 参数 | 描述 | 默认值 | +|:------------------------------:|:--------------:|:-------------------------------:| +| toposerver.enabled | 是否在执行helm时启动 | true | +| toposerver.image.repository | 服务镜像名 | cmdb_toposerver | +| toposerver.image.tag | 服务镜像版本 | {TAG_NAME} | +| toposerver.replicas | pod副本数量 | 1 | +| toposerver.port | 服务端口 | 80 | +| toposerver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_toposerver/logs | +| toposerver.command.logLevel | 日志等级 | 3 | +| toposerver.command.logToStdErr | 是否把日志输出到stderr | false | +| toposerver.workDir | 工作目录 | /data/cmdb/cmdb_toposerver | ### synchronizeserver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| synchronizeserver.enabled | 是否在执行helm时启动 | true | -| synchronizeserver.image.repository | 服务镜像名 | cmdb_synchronizeserver | -| synchronizeserver.image.tag | 服务镜像版本 | {TAG_NAME} | -| synchronizeserver.replicas | pod副本数量 | 1 | -| synchronizeserver.port | 服务端口 | 80 | -| synchronizeserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_synchronizeserver/logs | -| synchronizeserver.command.logLevel | 日志等级 | 3 | -| synchronizeserver.command.logToStdErr | 是否把日志输出到stderr | false | -| synchronizeserver.workDir | 工作目录 | /data/cmdb/cmdb_synchronizeserver | +| 参数 | 描述 | 默认值 | +|:-------------------------------------:|:--------------:|:--------------------------------------:| +| synchronizeserver.enabled | 是否在执行helm时启动 | true | +| synchronizeserver.image.repository | 服务镜像名 | cmdb_synchronizeserver | +| synchronizeserver.image.tag | 服务镜像版本 | {TAG_NAME} | +| synchronizeserver.replicas | pod副本数量 | 1 | +| synchronizeserver.port | 服务端口 | 80 | +| synchronizeserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_synchronizeserver/logs | +| synchronizeserver.command.logLevel | 日志等级 | 3 | +| synchronizeserver.command.logToStdErr | 是否把日志输出到stderr | false | +| synchronizeserver.workDir | 工作目录 | /data/cmdb/cmdb_synchronizeserver | ### webserver服务配置说明 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| webserver.enabled | 是否在执行helm时启动 | true | -| webserver.image.repository | 服务镜像名 | cmdb_webserver | -| webserver.image.tag | 服务镜像版本 | {TAG_NAME} | -| webserver.replicas | pod副本数量 | 1 | -| webserver.port | 服务端口 | 80 | -| webserver.ingress.enabled | 开启ingress访问 | true | -| webserver.ingress.hosts | ingress代理访问的域名 |cmdb.example.com| -| webserver.service.type | 服务类型 | ClusterIP | -| webserver.service.targetPort | 代理的目标端口 | 80 | -| webserver.service.nodePort | 访问端口 | | -| webserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_webserver/logs | -| webserver.command.logLevel | 日志等级 | 3 | -| webserver.command.logToStdErr | 是否把日志输出到stderr | false | -| webserver.workDir | 工作目录 | /data/cmdb/cmdb_webserver | +| 参数 | 描述 | 默认值 | +|:-----------------------------:|:--------------:|:------------------------------:| +| webserver.enabled | 是否在执行helm时启动 | true | +| webserver.image.repository | 服务镜像名 | cmdb_webserver | +| webserver.image.tag | 服务镜像版本 | {TAG_NAME} | +| webserver.replicas | pod副本数量 | 1 | +| webserver.port | 服务端口 | 80 | +| webserver.ingress.enabled | 开启ingress访问 | true | +| webserver.ingress.hosts | ingress代理访问的域名 | cmdb.example.com | +| webserver.service.type | 服务类型 | ClusterIP | +| webserver.service.targetPort | 代理的目标端口 | 80 | +| webserver.service.nodePort | 访问端口 | | +| webserver.command.logDir | 日志存放路径 | /data/cmdb/cmdb_webserver/logs | +| webserver.command.logLevel | 日志等级 | 3 | +| webserver.command.logToStdErr | 是否把日志输出到stderr | false | +| webserver.workDir | 工作目录 | /data/cmdb/cmdb_webserver | + +### syncserver服务配置说明 + +| 参数 | 描述 | 默认值 | +|:-----------------------------------------:|:----------------:|:-----:| +| syncServer.fullTextSearch.enableSync | 是否开启全文检索同步 | false | +| syncServer.fullTextSearch.indexShardNum | ES索引拥有的主分片数量 | 1 | +| syncServer.fullTextSearch.indexReplicaNum | ES索引每个主分片拥有的副本数量 | 1 | ### 服务开启鉴权开关 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| iam.auth.enable | 是否开启鉴权 | false | +| 参数 | 描述 | 默认值 | +|:---------------:|:------:|:-----:| +| iam.auth.enable | 是否开启鉴权 | false | ### common开头的配置 以`common`开头的配置,对应的是cmdb中`common.yaml`的配置文件中的各项配置,可根据原`common.yaml`中的配置对`common`开头的配置进行修改 ### mongodb配置 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| mongodb.enabled | 是否部署mognodb,如果需要使用外部数据库,设置为`false`并配置`mongodb.externalMongodb`和`mongodb.watch`下关于外部mongodb的配置 | true | + +| 参数 | 描述 | 默认值 | +|:---------------:|:---------------------------------------------------------------------------------------------:|:----:| +| mongodb.enabled | 是否部署mognodb,如果需要使用外部数据库,设置为`false`并配置`mongodb.externalMongodb`和`mongodb.watch`下关于外部mongodb的配置 | true | `mongodb.externalMongodb` 和 `mongodb.watch` 开头的配置,可根据原`mongodb.yaml`中的配置进行修改 ### redis配置 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| redis.enabled | 是否部署redis,如果需要使用外部数据库,设置为`false`并配置`redis.redis`、`redis.snapshotRedis`、`redis.discoverRedis`、`redis.netCollectRedis`下关于外部redis的配置 | true | -`redis.redis`、`redis.snapshotRedis`、`redis.discoverRedis`、`redis.netCollectRedis` 开头的配置,可根据原`redis.yaml`中的配置进行修改 +| 参数 | 描述 | 默认值 | +|:-------------:|:---------------------------------------------------------------------------------------------------------------------------------:|:----:| +| redis.enabled | 是否部署redis,如果需要使用外部数据库,设置为`false`并配置`redis.redis`、`redis.snapshotRedis`、`redis.discoverRedis`、`redis.netCollectRedis`下关于外部redis的配置 | true | + +`redis.redis`、`redis.snapshotRedis`、`redis.discoverRedis`、`redis.netCollectRedis` 开头的配置,可根据原`redis.yaml` +中的配置进行修改 ### zookeeper配置 -| 参数 | 描述 | 默认值 | -| :----------------------------------: | :-----------------------------: | :------------------------------: | -| zookeeper.enabled | 是否部署zookeeper作为配置发现中心、服务发现中心,如果需要使用外部zookeeper组件,设置为`false`并配置`configAndServiceCenter.addr` | true | + +| 参数 | 描述 | 默认值 | +|:-----------------:|:-------------------------------------------------------------------------------------------:|:----:| +| zookeeper.enabled | 是否部署zookeeper作为配置发现中心、服务发现中心,如果需要使用外部zookeeper组件,设置为`false`并配置`configAndServiceCenter.addr` | true | ### 配置发现中心、服务发现中心配置 -| 参数 | 描述 | 默认值 | -| :-------------------------: | :----------------------------------------------------------: | :----: | -| configAndServiceCenter.addr | 外部配置发现中心、服务发现中心地址,当zookeeper.enabled配置为`false`时,使用此参数连接外部组件 | | +| 参数 | 描述 | 默认值 | +|:---------------------------:|:-----------------------------------------------------------:|:---:| +| configAndServiceCenter.addr | 外部配置发现中心、服务发现中心地址,当zookeeper.enabled配置为`false`时,使用此参数连接外部组件 | | ### elasticsearch配置 -| 参数 | 描述 | 默认值 | -| :-------------------------: | :----------------------------------------------------------: | :----: | -| common.es.fullTextSearch | 开启全文索引开关,可选值为`on` 和 `off`, 默认关闭 | off | -| common.es.url | 连接外部es的url | | -| common.es.usr | 连接外部es的用户名 | | -| common.es.pwd | 连接外部es的密码 | | -| elasticsearch.enabled | 是否启动内部部署的es,如果需要使用外部es组件,设置为`false`并配置`common.es.url`、`common.es.usr`、`common.es.pwd`的外部组件信息| false | -| elasticsearch.master.replicas | 内置es的master节点数 | 1 | -| elasticsearch.coordinating.replicas | 内置es的协调节点数 | 1 | -| elasticsearch.data.replicas | 内置es的数据节点数 | 1 | - -### monstache配置 -monstache是一个用于将mongodb的数据同步到es去创建索引的一个组件 - -| 参数 | 描述 | 默认值 | -| :-------------------------: | :----------------------------------------------------------: | :----: | -| monstache.enabled | 是否启动内部部署的monstache,如果需要使用外部monstache组件,设置为`false` | false | -| monstache.image.repository | 服务镜像名 | cmdb_monstache | -| monstache.image.tag | 服务镜像版本 | {TAG_NAME} | -| monstache.replicas | pod副本数量 | 1 | -| monstache.port | 服务端口 | 80 | -| monstache.workDir | 工作路径 | /data/cmdb/monstache | -| monstache.configDir | 需要的配置文件路径 | /data/cmdb/monstache/etc | -| monstache.directReadDynamicIncludeRegex | monstache配置内容 |内容过长请查看原value.yaml文件| -| monstache.mapperPluginPath | monstache配置内容 |/data/cmdb/monstache/monstache-plugin.so| -| monstache.elasticsearchShardNum | monstache配置内容 | 1 | -| monstache.elasticsearchReplicaNum | monstache配置内容 | 1 | +| 参数 | 描述 | 默认值 | +|:-----------------------------------:|:--------------------------------------------------------------------------------------------:|:-----:| +| common.es.fullTextSearch | 开启全文索引开关,可选值为`on` 和 `off`, 默认关闭 | off | +| common.es.url | 连接外部es的url | | +| common.es.usr | 连接外部es的用户名 | | +| common.es.pwd | 连接外部es的密码 | | +| elasticsearch.enabled | 是否启动内部部署的es,如果需要使用外部es组件,设置为`false`并配置`common.es.url`、`common.es.usr`、`common.es.pwd`的外部组件信息 | false | +| elasticsearch.master.replicas | 内置es的master节点数 | 1 | +| elasticsearch.coordinating.replicas | 内置es的协调节点数 | 1 | +| elasticsearch.data.replicas | 内置es的数据节点数 | 1 | ### bkLogConfig配置 + - bkLogConfig配置用于配置接入蓝鲸日志平台功能 -| 参数 | 描述 | 默认值 | -| :-------------------------: | :----------------------------------------------------------: | :----: | -| bkLogConfig.file.enabled | 是否采集容器内落地文件日志 | false | -| bkLogConfig.file.dataId | 采集容器内落地文件日志的dataid,dataid在日志平台上申请分配 | 1 | -| bkLogConfig.std.enabled | 是否采集容器标准输出日志 | false | -| bkLogConfig.std.dataId | 采集容器标准输出日志的dataid,dataid在日志平台上申请分配 | 1 | +| 参数 | 描述 | 默认值 | +|:------------------------:|:-----------------------------------:|:-----:| +| bkLogConfig.file.enabled | 是否采集容器内落地文件日志 | false | +| bkLogConfig.file.dataId | 采集容器内落地文件日志的dataid,dataid在日志平台上申请分配 | 1 | +| bkLogConfig.std.enabled | 是否采集容器标准输出日志 | false | +| bkLogConfig.std.dataId | 采集容器标准输出日志的dataid,dataid在日志平台上申请分配 | 1 | ### serviceMonitor配置 + - serviceMonitor配置用于配置服务监控功能 -| 参数 | 描述 | 默认值 | -| :-------------------------: | :----------------------------------------------------------: | :----: | -| serviceMonitor.enabled | 是否开启服务监控,采集cmdb业务指标数据 | false | -| serviceMonitor.interval | cmdb业务指标数据采集间隔时间 | 15s | +| 参数 | 描述 | 默认值 | +|:-----------------------:|:---------------------:|:-----:| +| serviceMonitor.enabled | 是否开启服务监控,采集cmdb业务指标数据 | false | +| serviceMonitor.interval | cmdb业务指标数据采集间隔时间 | 15s | ### 证书相关配置 -| 参数 | 描述 | 默认值 | -| :-------------------------: | :----------------------------------------------------------: | :----: | -| certPath | 证书的挂载pod里的路径 | "/data/cmdb/cert" | -| gseCert.ca |gse的CA证书 | "" | -| gseCert.cmdbCert | cmdb连接gse服务所需要的证书 |"" | -| gseCert.cmdbKey | cmdb连接gse服务所需要的证书的密钥 | ""| + +| 参数 | 描述 | 默认值 | +|:----------------:|:--------------------:|:-----------------:| +| certPath | 证书的挂载pod里的路径 | "/data/cmdb/cert" | +| gseCert.ca | gse的CA证书 | "" | +| gseCert.cmdbCert | cmdb连接gse服务所需要的证书 | "" | +| gseCert.cmdbKey | cmdb连接gse服务所需要的证书的密钥 | "" | ## 配置案例 ### 1. 使用外接mongodb + ```yaml mongodb: enabled: false @@ -494,57 +485,21 @@ common: fullTextSearch: "on" ``` -- 在cmdb中使用elasticsearch需要依赖两个组件,一个是elasticsearch本身,一个monstache(用于将mongodb数据同步到elasticsearch) +- 在cmdb中使用elasticsearch需要依赖两个组件,一个是elasticsearch本身,一个是syncserver服务(用于将mongodb数据同步到elasticsearch) - (1)使用内置组件 - - ​ helm chart中有内置的elasticsearch和monstache,可通过下面操作打开: + ​ helm chart中有内置的elasticsearch和syncserver,可通过下面操作打开elasticsearch和syncserver的全文检索同步服务: ```yaml elasticsearch: enabled: true ·· - - monstache: - enabled: true - ``` - - ​ 将elasticsearch和monstache的enabled变为true即可 - - - - (2)使用外接组件 - - 这里仅需配置连接外置的elasticsearch,这时外置的monstach已经与cmdb没有配置上的联系 - ```yaml - common: - es: - url: xxx - usr: xxx - pwd: xxx - ``` - - 配置上外部es的url,账户密码的信息即可 - -- 当然也可以使用内置的monstache,连接外部的elasticsearch - - ```yaml - monstache: - enabled: true - - ··· - - common: - es: - url: xxx - usr: xxx - pwd: xxx + syncServer: + fullTextSearch: + enableSync: true ``` - 将monstache.enabled变为true,配置好外部的elasticsearch的配置即可 - ### 5. 配置webserver不同的服务暴露方式 默认通过Ingress暴露服务,也可以使用以下方式: @@ -572,33 +527,36 @@ common: 修改上述配置后,即可通过`ip:32033`的方式访问 - ### 6. 开启权限验证 - 通过进行下面的配置: +### 6. 开启权限验证 + +通过进行下面的配置: + ```yaml 开启权限 iam: auth: enabled: true -// 配置权限中心和esb地址、app code、app secret,开启前端的auth + // 配置权限中心和esb地址、app code、app secret,开启前端的auth bkIamApiUrl: xxx bkComponentApiUrl: xxx common: - ... + ... auth: appCode: xxx appSecret: xxx esb: appCode: xxx appSecret: xxx - ... + ... webServer: site: authScheme: iam ``` ### 7. blueking方式登陆 + ```yaml 通过将登陆方式设置为蓝鲸登陆方式和配置蓝鲸登陆地址等信息: @@ -647,13 +605,13 @@ common: 127.0.0.1 cmdb.bk.com ``` -在minikube环境通过下面指令启用` Ingress `控制器 +在minikube环境通过下面指令启用` Ingress `控制器 + ```yaml minikube addons enable ingress ``` -配置完后,通过访问`cmdb.bk.com/login`地址进行登陆,默认 的账号为`cc`,密码为`cc` - +配置完后,通过访问`cmdb.bk.com/login`地址进行登陆,默认 的账号为`cc`,密码为`cc` ### 2. cmdb启动不起来 @@ -668,13 +626,14 @@ authserver: enabled: false ``` -变为false不启动authserver服务 或者配置权限中心所需要的配置 +变为false不启动authserver服务 或者配置权限中心所需要的配置 如果长时间有些服务无法启动,如:toposerver,请确认job是否启动并执行完成 ### 3. 想要配置多个外置zookeeper地址作为服务中心怎么办? 答:通过,(逗号)分隔,如下: + ``` configAndServiceCenter: addr: 127.0.0.1:2181,127.0.0.2:2181 @@ -684,6 +643,7 @@ configAndServiceCenter: ### 4. 想要配置多个外置redis地址怎么办? 答:通过,(逗号)分隔,如下: + ``` redis: ... @@ -696,6 +656,7 @@ redis: ### 5. 想要配置多个外置mongo地址怎么办? 答:通过,(逗号)分隔,如下: + ``` mongodb: # external mongo configuration diff --git a/docs/support-file/helm/templates/adminserver/configmap.yaml b/docs/support-file/helm/templates/adminserver/configmap.yaml index 3ab26def1d..174ab2bdc8 100644 --- a/docs/support-file/helm/templates/adminserver/configmap.yaml +++ b/docs/support-file/helm/templates/adminserver/configmap.yaml @@ -251,6 +251,17 @@ data: # 下发主机身份文件权限值 filePrivilege: {{ .Values.common.eventServer.hostIdentifier.windows.filePrivilege }} + ## syncServer相关配置 + syncServer: + # 全文检索同步相关配置 + fullTextSearch: + # 是否开启全文检索同步 + enableSync: {{ .Values.common.syncServer.fullTextSearch.enableSync }} + # elasticsearch索引的shard数量 + indexShardNum: {{ .Values.common.syncServer.fullTextSearch.indexShardNum }} + # elasticsearch索引的replica数量 + indexReplicaNum: {{ .Values.common.syncServer.fullTextSearch.indexReplicaNum }} + # 直接调用gse服务相关配置 gse: # 调用gse的apiServer服务时相关配置 diff --git a/docs/support-file/helm/templates/monstache/configmap.yaml b/docs/support-file/helm/templates/monstache/configmap.yaml deleted file mode 100644 index f6e327f6b4..0000000000 --- a/docs/support-file/helm/templates/monstache/configmap.yaml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Release.Name }}-monstache-configures -data: - - config.toml: |- - # mongodb settings - mongo-url = {{ include "cmdb.mongodb.mongo-url" . | quote }} - - # elasticsearch settings - elasticsearch-urls = [{{ include "cmdb.elasticsearch.urlAndPort" . | quote }}] - {{- if .Values.common.es.usr }} - elasticsearch-user = {{ .Values.common.es.usr | quote }} - {{- end }} - {{- if .Values.common.es.pwd }} - elasticsearch-password = {{ .Values.common.es.pwd | quote }} - {{- end }} - gzip = true - - # metadata collections. - change-stream-namespaces = [""] - direct-read-namespaces = [""] - direct-read-dynamic-include-regex = {{ .Values.monstache.directReadDynamicIncludeRegex | quote }} - - # plugin - mapper-plugin-path = {{ .Values.monstache.mapperPluginPath | quote }} - - # resume mode - resume = false - - extra.toml: |- - elasticsearch-shard-num = {{ .Values.monstache.elasticsearchShardNum | quote }} - elasticsearch-replica-num = {{ .Values.monstache.elasticsearchReplicaNum | quote }} - - - - diff --git a/docs/support-file/helm/templates/monstache/monstache-dpl.yaml b/docs/support-file/helm/templates/monstache/monstache-dpl.yaml deleted file mode 100644 index d7f0462cfa..0000000000 --- a/docs/support-file/helm/templates/monstache/monstache-dpl.yaml +++ /dev/null @@ -1,51 +0,0 @@ -{{- if .Values.monstache.enabled }} -apiVersion: {{ template "common.capabilities.deployment.apiVersion" . }} -kind: Deployment -metadata: - name: "{{ template "bk-cmdb.fullname" . }}-monstache" - labels: - {{- include "common.labels.standard" . | nindent 4 }} - component: monstache -spec: - replicas: {{ .Values.monstache.replicas }} - selector: - matchLabels: - {{- include "common.labels.matchLabels" . | nindent 6 }} - component: monstache - template: - metadata: - labels: - {{- include "common.labels.standard" . | nindent 8 }} - component: monstache - values-hash: "{{ toYaml .Values | sha256sum | trunc 63 }}" - {{- with .Values.monstache.annotations }} - annotations: - {{ toYaml . | indent 8 }} - {{- end }} - {{- if .Values.monstache.podAnnotations }} - {{ toYaml .Values.monstache.podAnnotations | indent 8 }} - {{- end }} - spec: - containers: - - name: monstache - image: {{ .Values.image.registry }}/{{ .Values.monstache.image.repository }}:{{ .Values.monstache.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - - workingDir: {{ .Values.monstache.workDir }} - command: ["./monstache"] - args: ["-f", "./etc/config.toml"] - ports: - - containerPort: {{ .Values.monstache.port }} - volumeMounts: - - name: configures - mountPath: {{ .Values.monstache.configDir }} - - {{- if .Values.monstache.resources }} - resources: {{ toYaml .Values.monstache.resources | nindent 10 }} - {{- end }} - volumes: - - name: configures - configMap: - name: {{ .Release.Name }}-monstache-configures - -{{- end }} diff --git a/docs/support-file/helm/templates/syncserver/syncserver-dpl.yaml b/docs/support-file/helm/templates/syncserver/syncserver-dpl.yaml new file mode 100644 index 0000000000..68a21133a8 --- /dev/null +++ b/docs/support-file/helm/templates/syncserver/syncserver-dpl.yaml @@ -0,0 +1,119 @@ +{{- if .Values.syncserver.enabled }} +apiVersion: {{ template "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: "{{ template "bk-cmdb.fullname" . }}-syncserver" + labels: + {{- include "common.labels.standard" . | nindent 4 }} + component: syncserver +spec: + replicas: {{ .Values.syncserver.replicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + component: syncserver + template: + metadata: + labels: + {{- include "common.labels.standard" . | nindent 8 }} + component: syncserver + values-hash: "{{ toYaml .Values | sha256sum | trunc 63 }}" + {{- with .Values.syncserver.annotations }} + annotations: + {{ toYaml . | indent 8 }} + {{- end }} + {{- if .Values.syncserver.podAnnotations }} + {{ toYaml .Values.syncserver.podAnnotations | indent 8 }} + {{- end }} + spec: + containers: + - name: syncserver + image: {{ .Values.image.registry }}/{{ .Values.syncserver.image.repository }}:v{{ default .Chart.AppVersion .Values.syncserver.image.tag }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + workingDir: {{ .Values.syncserver.workDir }} + command: + - ./cmdb_syncserver + - --addrport=$(POD_IP):{{ .Values.syncserver.port }} + {{- if .Values.syncserver.configDir }} + - --config={{ .Values.syncserver.configDir }} + {{- end }} + - --regdiscv={{ include "cmdb.configAndServiceCenter.addr" . }} + {{- if .Values.syncserver.command.logDir }} + - --log-dir={{ .Values.syncserver.command.logDir }} + {{- end }} + - --v={{ .Values.syncserver.command.logLevel }} + - --logtostderr={{ .Values.syncserver.command.logToStdErr }} + - "--enable-auth" + - {{ .Values.iam.auth.enabled | quote }} + livenessProbe: + httpGet: + path: /healthz + port: {{ .Values.syncserver.port }} + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + readinessProbe: + httpGet: + path: /healthz + port: {{ .Values.syncserver.port }} + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + {{- if .Values.syncserver.resources }} + resources: {{ toYaml .Values.syncserver.resources | nindent 10 }} + {{- end }} + + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if .Values.syncserver.env }} + {{ toYaml .Values.syncserver.env | indent 10 }} + {{- end }} + + ports: + - containerPort: {{ .Values.syncserver.port }} + + volumeMounts: + {{- if .Values.common.monitor.enabled }} + - name: plugin-path + mountPath: {{ .Values.common.monitor.pluginPath }} + {{- end }} + {{- if .Values.syncserver.configDir }} + - name: configures + mountPath: {{ .Values.syncserver.configDir }} + {{- end }} + volumes: + {{- if .Values.common.monitor.enabled }} + - name: plugin-path + hostPath: + path: {{ .Values.common.monitor.pluginPath }} + {{- end }} + {{- if .Values.syncserver.configDir }} + - name: configures + configMap: + name: {{ .Release.Name }}-syncserver-configures + {{- end }} + + {{- with .Values.syncserver.nodeSelector }} + nodeSelector: + {{ toYaml . | nindent 8 }} + {{- end }} + + {{- with .Values.syncserver.affinity }} + affinity: + {{ toYaml . | nindent 8 }} + {{- end }} + + {{- with .Values.syncserver.tolerations }} + tolerations: + {{ toYaml . | nindent 8 }} + {{- end }} + +{{- end }} diff --git a/docs/support-file/helm/templates/syncserver/syncserver-svc.yaml b/docs/support-file/helm/templates/syncserver/syncserver-svc.yaml new file mode 100644 index 0000000000..1631f34700 --- /dev/null +++ b/docs/support-file/helm/templates/syncserver/syncserver-svc.yaml @@ -0,0 +1,16 @@ +{{- if .Values.syncserver.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: "{{ template "bk-cmdb.syncserver" . }}" + labels: +{{ include "common.labels.standard" . | indent 4 }} +spec: + ports: + - name: http + port: 80 + targetPort: {{ .Values.syncserver.port }} + selector: +{{ include "common.labels.matchLabels" . | indent 4 }} + component: syncserver +{{- end }} diff --git a/docs/support-file/helm/values.yaml b/docs/support-file/helm/values.yaml index 8c4bb0f6bc..d8f546ad08 100644 --- a/docs/support-file/helm/values.yaml +++ b/docs/support-file/helm/values.yaml @@ -752,6 +752,67 @@ procserver: memory: 512Mi cpu: 200m +## @section bk-cmdb syncserver parameters +## +syncserver: + ## @param syncserver.enabled Enable syncserver deployment + ## + enabled: true + ## bk-cmdb syncserver image parameters + ## @param syncserver.image.repository bk-cmdb syncserver image repository + ## @param syncserver.image.rag bk-cmdb syncserver image tag + ## + image: + repository: blueking/cmdb_syncserver + tag: + ## @param syncserver.replicas Number of syncserver replicas to deploy + ## + replicas: 1 + ## @param syncserver.port bk-cmdb syncserver service HTTP port + ## + port: 80 + ## @param syncserver.workDir bk-cmdb syncserver work directory + ## + workDir: /data/cmdb/cmdb_syncserver + ## bk-cmdb syncserver start command parameters + ## @param syncserver.command.logDir bk-cmdb syncserver log directory + ## @param syncserver.command.logLevel bk-cmdb syncserver log level + ## @param syncserver.command.logLevel Enable bk-cmdb syncserver print log to stderr + ## + command: + logDir: /data/cmdb/cmdb_syncserver/logs + logLevel: 3 + logToStdErr: false + ## @param podAnnotations Annotations for bk-panel pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param nodeSelector Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param tolerations Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## bk-cmdb containers' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param resources.limits The resources limits for the bk-cmdb container + ## @param resources.requests [object] The requested resources for the bk-cmdb container + ## + resources: + limits: + cpu: 200m + memory: 1024Mi + requests: + memory: 512Mi + cpu: 200m + ## @section bk-cmdb synchronizeserver parameters ## synchronizeserver: @@ -1395,6 +1456,17 @@ common: ## filePrivilege: 644 + ## sync server common config parameters + syncServer: + # @param common.syncServer.fullTextSearch full-text search synchronization common config parameters + fullTextSearch: + # @param common.syncServer.fullTextSearch.enableSync if full-text search synchronization is enabled + enableSync: true + # @param common.syncServer.fullTextSearch.indexShardNum es index sharding number + indexShardNum: 1 + # @param common.syncServer.fullTextSearch.indexReplicaNum es index replicas number + indexReplicaNum: 1 + ## gse server config ## gse: @@ -1746,74 +1818,6 @@ elasticsearch: size: 10Gi replicas: 1 -## @section monstache parameters -## -monstache: - ## @param monstache.enabled Enable monstache - ## - enabled: false - ## bk-cmdb monstache image parameters - ## @param monstache.image.repository monstache image repository - ## @param monstache.image.rag monstache image tag - ## - image: - repository: blueking/cmdb_monstache - tag: v2.0.0 - ## @param monstache.replicas Number of monstache replicas to deploy - ## - replicas: 1 - ## @param monstache.port monstache service HTTP port - ## - port: 80 - ## @param monstache.workDir monstache work directory - ## - workDir: /data/cmdb/monstache - ## @param monstache.configDir monstache config directory - ## - configDir: /data/cmdb/monstache/etc - ## @param monstache.directReadDynamicIncludeRegex monstache direct read dynamic include regex - ## - directReadDynamicIncludeRegex: cmdb.cc_ApplicationBase$|cc_SetBase$|cc_ModuleBase$|cmdb.cc_HostBase$|cmdb.cc_ObjDes$|cc_ObjAttDes$|cmdb.cc_ObjectBase_(.*)_pub_" - namespace-regex = "cmdb.cc_ApplicationBase$|cc_SetBase$|cc_ModuleBase$|cmdb.cc_HostBase$|cmdb.cc_ObjDes$|cc_ObjAttDes$|cmdb.cc_ObjectBase_(.*)_pub_ - ## @param monstache.mapperPluginPath monstache plugin path - ## - mapperPluginPath: /data/cmdb/monstache/monstache-plugin.so - ## @param monstache.elasticsearchShardNum elasticsearch sharding number - ## - elasticsearchShardNum: 1 - ## @param monstache.elasticsearchReplicaNum elasticsearch replicas number - ## - elasticsearchReplicaNum: 1 - ## @param podAnnotations Annotations for bk-panel pods - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## @param affinity Affinity for pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param nodeSelector Node labels for pod assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - ## @param tolerations Tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - ## bk-cmdb containers' resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## @param resources.limits The resources limits for the bk-cmdb container - ## @param resources.requests [object] The requested resources for the bk-cmdb container - ## - resources: - limits: - cpu: 200m - memory: 1024Mi - requests: - memory: 512Mi - cpu: 200m - ## @section ServiceMonitor parameters ## serviceMonitor: diff --git a/pkg/types/sync/full-text-search/full_text_search.go b/pkg/types/sync/full-text-search/full_text_search.go new file mode 100644 index 0000000000..23251a13ec --- /dev/null +++ b/pkg/types/sync/full-text-search/full_text_search.go @@ -0,0 +1,83 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package fulltextsearch + +import ( + "configcenter/src/common" + "configcenter/src/common/errors" +) + +// SyncDataPageSize is the size of one sync data operation page +const SyncDataPageSize = 500 + +// SyncDataOption defines the sync full-text search data options +type SyncDataOption struct { + // IsAll defines if sync all data + IsAll bool `json:"is_all"` + // Index defines which index's data to sync + Index string `json:"index"` + // Collection defines which collection's data to sync + Collection string `json:"collection"` + // Oids defines the specific oids of data to sync in collection, it must be set with the Collection field + Oids []string `json:"oids"` +} + +// Validate sync data options +func (o *SyncDataOption) Validate() errors.RawErrorInfo { + if o == nil { + return errors.RawErrorInfo{ErrCode: common.CCErrCommParamsNeedSet, Args: []interface{}{"option"}} + } + + if o.IsAll { + if len(o.Collection) > 0 || len(o.Index) > 0 || len(o.Oids) > 0 { + return errors.RawErrorInfo{ErrCode: common.CCErrCommParamsInvalid, + Args: []interface{}{"only one of the sync options can be set"}} + } + + return errors.RawErrorInfo{} + } + + if len(o.Index) > 0 { + if len(o.Collection) > 0 || len(o.Oids) > 0 { + return errors.RawErrorInfo{ErrCode: common.CCErrCommParamsInvalid, + Args: []interface{}{"only one of the sync options can be set"}} + } + return errors.RawErrorInfo{} + } + + if len(o.Collection) == 0 || len(o.Oids) == 0 { + return errors.RawErrorInfo{ErrCode: common.CCErrCommParamsInvalid, + Args: []interface{}{"one of the sync options must be set"}} + } + + if len(o.Oids) > SyncDataPageSize { + return errors.RawErrorInfo{ErrCode: common.CCErrCommXXExceedLimit, + Args: []interface{}{"ids length", SyncDataPageSize}} + } + + return errors.RawErrorInfo{} +} + +// MigrateResult defines the sync full-text search migrate result +type MigrateResult struct { + PreVersion int `json:"pre_version,omitempty"` + CurrentVersion int `json:"current_version,omitempty"` + FinishedVersions []int `json:"finished_migrations,omitempty"` + + Message string `json:"message,omitempty"` +} diff --git a/scripts/build.sh b/scripts/build.sh index e6294afe4b..ddee7232c8 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -6,7 +6,6 @@ pushd $(pwd) > /dev/null for tmp in $DIRS;do FILES=$(find $tmp -name 'Makefile') for tmp_file in $FILES;do - # 全文检索插件编译场景下,由于将依赖vendor放到了src/tools/monstache_plugin下,所以需要将vendor下的Makefile跳过 if [[ $tmp_file == *vendor* ]] || [[ $tmp_file == *gse* ]] then continue diff --git a/scripts/init.py b/scripts/init.py index 3670486461..17faeb5c79 100755 --- a/scripts/init.py +++ b/scripts/init.py @@ -168,50 +168,6 @@ def generate_config_file( with open(output + "mongodb.yaml", 'w') as tmp_file: tmp_file.write(result) - outputMonstache = os.getcwd() + "/monstache/etc/" - if not os.path.exists(outputMonstache): - mkdir_p(outputMonstache) - # monstache.so config.toml - monstachesoconfig_file_template_str = ''' -# mongodb settings -mongo-url = "mongodb://$mongo_user:$mongo_pass@$mongo_host:$mongo_port/$db" - -# elasticsearch settings -elasticsearch-urls = ["$es_url"] -elasticsearch-user = "$es_user" -elasticsearch-password = "$es_pass" -gzip = true - -# metadata collections. -change-stream-namespaces = [""] -direct-read-namespaces = [""] -direct-read-dynamic-include-regex = "cmdb.cc_ApplicationBase$$|cc_SetBase$$|cc_ModuleBase$$|cmdb.cc_HostBase$$|cmdb.cc_ObjDes$$|cc_ObjAttDes$$|cmdb.cc_ObjectBase_(.*)_pub_" -namespace-regex = "cmdb.cc_ApplicationBase$$|cc_SetBase$$|cc_ModuleBase$$|cmdb.cc_HostBase$$|cmdb.cc_ObjDes$$|cc_ObjAttDes$$|cmdb.cc_ObjectBase_(.*)_pub_" - -# plugin -mapper-plugin-path = "etc/monstache-plugin.so" - -# resume mode -resume = true - ''' - template = FileTemplate(monstachesoconfig_file_template_str) - result = template.substitute(**context) - with open(outputMonstache + "config.toml", 'w') as tmp_file: - tmp_file.write(result) - - # monstache.so extra.toml.toml - monstachesoextra_file_template_str = ''' -# elasticsearch settings - -# the param must be assigned -elasticsearch-shard-num = "$es_shard_num" -elasticsearch-replica-num = "$es_replica_num" - ''' - template = FileTemplate(monstachesoextra_file_template_str) - result = template.substitute(**context) - with open(outputMonstache + "extra.toml", 'w') as tmp_file: - tmp_file.write(result) - # common.yaml common_file_template_str = ''' #topoServer: @@ -257,6 +213,11 @@ def generate_config_file( # secretsToken: # secretsProject: # secretsEnv: +#syncServer: +# fullTextSearch: +# enableSync: false +# indexShardNum: 1 +# indexReplicaNum: 1 #elasticsearch配置 es: @@ -530,6 +491,17 @@ def generate_config_file( caFile: # 用于解密根据RFC1423加密的证书密钥的PEM块 password: + +# syncServer相关配置 +syncServer: + # 全文检索同步相关配置 + fullTextSearch: + # 是否开启全文检索同步, 默认为false + enableSync: false + # ES索引拥有的主分片数量 + indexShardNum: 1 + # ES索引每个主分片拥有的副本数量 + indexReplicaNum: 1 ''' template = FileTemplate(common_file_template_str) @@ -580,8 +552,6 @@ def generate_config_file( # res: /data/cmdb/cmdb_adminserver/conf/errors #language: # res: /data/cmdb/cmdb_adminserver/conf/language -#monstache: -# dir: /data/cmdb/monstache/etc #auth: # address: 127.0.0.1 # appCode: bk_cmdb @@ -606,9 +576,6 @@ def generate_config_file( # 指定language的路径 language: res: conf/language -# 指定monstache相关配置文件 -monstache: - res: monstache/etc ''' template = FileTemplate(migrate_file_template_str) @@ -716,6 +683,7 @@ def main(argv): "cmdb_taskserver": 60012, "cmdb_cloudserver": 60013, "cmdb_authserver": 60014, + "cmdb_syncserver": 60015, "cmdb_cacheservice": 50010 } arr = [ @@ -1031,7 +999,6 @@ def main(argv): ) update_start_script(rd_server, server_ports, auth['auth_enabled'], log_level, register_ip, enable_cryptor) print('initial configurations success, configs could be found at cmdb_adminserver/configures') - print('initial monstache config success, configs could be found at monstache/etc') if __name__ == "__main__": diff --git a/src/Makefile b/src/Makefile index f55de6f0c6..054f95580f 100644 --- a/src/Makefile +++ b/src/Makefile @@ -68,13 +68,11 @@ enterprise: mkdir -p $(SOURCE_ROOT)/bin/enterprise/cmdb/errors/ mkdir -p $(SOURCE_ROOT)/bin/enterprise/cmdb/language/ mkdir -p $(SOURCE_ROOT)/bin/enterprise/cmdb/web/ - mkdir -p $(SOURCE_ROOT)/bin/enterprise/cmdb/monstache/etc mkdir -p $(SOURCE_ROOT)/bin/enterprise/cmdb/support-files/ mkdir -p $(SOURCE_ROOT)/bin/enterprise/cmdb/server/changelog_user/ cp -R ${RESOURCE_DIR}/errors/* $(SOURCE_ROOT)/bin/enterprise/cmdb/errors cp -R ${RESOURCE_DIR}/language/* $(SOURCE_ROOT)/bin/enterprise/cmdb/language - cp -R ${BIN_PATH}/monstache/ $(SOURCE_ROOT)/bin/enterprise/cmdb/monstache cp -R ${BIN_PATH}/web/* $(SOURCE_ROOT)/bin/enterprise/cmdb/web/ cp ${BIN_PATH}/cmdb_*/cmdb_* $(SOURCE_ROOT)/bin/enterprise/cmdb/server/bin/ cp ${BIN_PATH}/tool_*/tool_* $(SOURCE_ROOT)/bin/enterprise/cmdb/server/bin/ diff --git a/src/apimachinery/cacheservice/cache/common/api.go b/src/apimachinery/cacheservice/cache/common/api.go new file mode 100644 index 0000000000..549a9af55a --- /dev/null +++ b/src/apimachinery/cacheservice/cache/common/api.go @@ -0,0 +1,41 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package common defines the common resource cache client +package common + +import ( + "context" + "net/http" + + "configcenter/src/apimachinery/rest" + "configcenter/src/common/metadata" +) + +// Interface is the common resource cache client interface +type Interface interface { + ListWithKey(ctx context.Context, h http.Header, typ string, opt *metadata.ListCommonCacheWithKeyOpt) (string, error) +} + +// NewCacheClient new common resource cache client +func NewCacheClient(client rest.ClientInterface) Interface { + return &cache{client: client} +} + +type cache struct { + client rest.ClientInterface +} diff --git a/src/apimachinery/cacheservice/cache/common/cache.go b/src/apimachinery/cacheservice/cache/common/cache.go new file mode 100644 index 0000000000..c60850a3a2 --- /dev/null +++ b/src/apimachinery/cacheservice/cache/common/cache.go @@ -0,0 +1,49 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package common + +import ( + "context" + "net/http" + + "configcenter/src/common/errors" + "configcenter/src/common/metadata" +) + +// ListWithKey list common resource cache info with specified keys +func (c *cache) ListWithKey(ctx context.Context, h http.Header, typ string, opt *metadata.ListCommonCacheWithKeyOpt) ( + string, error) { + + resp, err := c.client.Post(). + WithContext(ctx). + Body(opt). + SubResourcef("/find/cache/type/%s/with_key", typ). + WithHeaders(h). + Do(). + IntoJsonString() + + if err != nil { + return "", errors.CCHttpError + } + + if !resp.Result { + return "", resp.CCError() + } + + return resp.Data, nil +} diff --git a/src/apimachinery/cacheservice/cacheservice.go b/src/apimachinery/cacheservice/cacheservice.go index 2bd02a03d8..f445485ebf 100644 --- a/src/apimachinery/cacheservice/cacheservice.go +++ b/src/apimachinery/cacheservice/cacheservice.go @@ -16,6 +16,7 @@ package cacheservice import ( "fmt" + "configcenter/src/apimachinery/cacheservice/cache/common" "configcenter/src/apimachinery/cacheservice/cache/event" "configcenter/src/apimachinery/cacheservice/cache/host" "configcenter/src/apimachinery/cacheservice/cache/topology" @@ -28,6 +29,7 @@ type Cache interface { Host() host.Interface Topology() topology.Interface Event() event.Interface + CommonRes() common.Interface } // CacheServiceClientInterface TODO @@ -72,3 +74,8 @@ func (c *cache) Topology() topology.Interface { func (c *cache) Event() event.Interface { return event.NewCacheClient(c.restCli) } + +// CommonRes is the common resource cache client +func (c *cache) CommonRes() common.Interface { + return common.NewCacheClient(c.restCli) +} diff --git a/src/common/definitions.go b/src/common/definitions.go index f584e48073..1b35f05c59 100644 --- a/src/common/definitions.go +++ b/src/common/definitions.go @@ -1758,3 +1758,8 @@ const ( // TopoModuleName topo path name TopoModuleName = "topo_module_name" ) + +const ( + // MongoMetaID is mongodb meta id field + MongoMetaID = "_id" +) diff --git a/src/common/mapstruct/decode.go b/src/common/mapstruct/decode.go index 484346442e..b5547207d8 100644 --- a/src/common/mapstruct/decode.go +++ b/src/common/mapstruct/decode.go @@ -63,3 +63,19 @@ func Struct2Map(v interface{}) (map[string]interface{}, error) { } return data, nil } + +// Decode2StructWithTag convert map to struct using specified tag +// 适用场景:仅支持原生转 map to struct +func Decode2StructWithTag(m map[string]interface{}, st interface{}, tag string) error { + config := &mapstructure.DecoderConfig{ + TagName: tag, + Result: st, + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(m) +} diff --git a/src/common/metadata/cache.go b/src/common/metadata/cache.go index 4515baa165..dbd363019b 100644 --- a/src/common/metadata/cache.go +++ b/src/common/metadata/cache.go @@ -12,7 +12,12 @@ package metadata -// SearchHostWithIP 通过IP查找host details请求参数 +import ( + "configcenter/src/common" + "configcenter/src/common/errors" +) + +// SearchHostWithInnerIPOption 通过IP查找host details请求参数 type SearchHostWithInnerIPOption struct { InnerIP string `json:"bk_host_innerip"` CloudID int64 `json:"bk_cloud_id"` @@ -61,3 +66,28 @@ type ListHostWithPage struct { // max page limit is 1000 Page BasePage `json:"page"` } + +// ListCommonCacheWithKeyOpt is the option to list common cache with key. +type ListCommonCacheWithKeyOpt struct { + Kind string `json:"kind"` + // length range is [1,500] + Keys []string `json:"keys"` + Fields []string `json:"fields"` +} + +// Validate ListCommonCacheWithKeyOpt +func (opt ListCommonCacheWithKeyOpt) Validate() errors.RawErrorInfo { + if len(opt.Kind) == 0 { + return errors.RawErrorInfo{ErrCode: common.CCErrCommParamsNeedSet, Args: []interface{}{"kind"}} + } + + if len(opt.Keys) == 0 { + return errors.RawErrorInfo{ErrCode: common.CCErrCommParamsNeedSet, Args: []interface{}{"keys"}} + } + + if len(opt.Keys) > 500 { + return errors.RawErrorInfo{ErrCode: common.CCErrCommXXExceedLimit, Args: []interface{}{"keys", 500}} + } + + return errors.RawErrorInfo{} +} diff --git a/src/common/types/serverInfo.go b/src/common/types/serverInfo.go index b811da5833..077a49655c 100644 --- a/src/common/types/serverInfo.go +++ b/src/common/types/serverInfo.go @@ -52,6 +52,7 @@ const ( CC_MODULE_TASK = "task" CC_MODULE_CLOUD = "cloud" CC_MODULE_AUTH = "auth" + CC_MODULE_SYNC = "sync" // CC_MODULE_CACHE 缓存服务 CC_MODULE_CACHESERVICE = "cacheservice" ) diff --git a/src/scene_server/sync_server/Makefile b/src/scene_server/sync_server/Makefile new file mode 100644 index 0000000000..8871af3130 --- /dev/null +++ b/src/scene_server/sync_server/Makefile @@ -0,0 +1,5 @@ +TARGET_NAME?=cmdb_syncserver +TARGET_PORT?=60015 +PROJECT_PATH=$(shell cd ../../../; pwd) + +include ../../../scripts/Makefile diff --git a/src/scene_server/sync_server/app/options/options.go b/src/scene_server/sync_server/app/options/options.go new file mode 100644 index 0000000000..2c09586afd --- /dev/null +++ b/src/scene_server/sync_server/app/options/options.go @@ -0,0 +1,47 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package options defines sync server options. +package options + +import ( + "configcenter/src/common/core/cc/config" + + "github.com/spf13/pflag" +) + +// ServerOption defines sync server options in flags +type ServerOption struct { + ServConf *config.CCAPIConfig +} + +// NewServerOption new ServerOption +func NewServerOption() *ServerOption { + s := ServerOption{ + ServConf: config.NewCCAPIConfig(), + } + + return &s +} + +// AddFlags add flags for ServerOption +func (s *ServerOption) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&s.ServConf.AddrPort, "addrport", "127.0.0.1:60015", "The ip address and port of the server") + fs.StringVar(&s.ServConf.RegDiscover, "regdiscv", "", "Register and discover service address. e.g: 127.0.0.1:2181") + fs.StringVar(&s.ServConf.ExConfig, "config", "", "Extra config path. e.g conf/api.conf") + fs.StringVar(&s.ServConf.RegisterIP, "register-ip", "", "The ip address registered on zookeeper, it can be domain") +} diff --git a/src/scene_server/sync_server/app/server.go b/src/scene_server/sync_server/app/server.go new file mode 100644 index 0000000000..73e72ab44e --- /dev/null +++ b/src/scene_server/sync_server/app/server.go @@ -0,0 +1,165 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package app starts sync server +package app + +import ( + "context" + "errors" + "fmt" + "time" + + "configcenter/src/common" + "configcenter/src/common/backbone" + cc "configcenter/src/common/backbone/configcenter" + "configcenter/src/common/blog" + "configcenter/src/common/types" + "configcenter/src/scene_server/sync_server/app/options" + "configcenter/src/scene_server/sync_server/logics" + fulltextsearch "configcenter/src/scene_server/sync_server/logics/full-text-search" + "configcenter/src/scene_server/sync_server/service" + "configcenter/src/storage/driver/mongodb" + "configcenter/src/storage/driver/redis" + "configcenter/src/storage/stream" + "configcenter/src/thirdparty/elasticsearch" +) + +// SyncServer is the sync server +type SyncServer struct { + Core *backbone.Engine + Config *logics.Config + Service *service.Service + Logics *logics.Logics +} + +// Run sync server +func Run(ctx context.Context, cancel context.CancelFunc, op *options.ServerOption) error { + svrInfo, err := types.NewServerInfo(op.ServConf) + if err != nil { + return fmt.Errorf("wrap server info failed, err: %v", err) + } + + server := new(SyncServer) + + input := &backbone.BackboneParameter{ + Regdiscv: op.ServConf.RegDiscover, + ConfigPath: op.ServConf.ExConfig, + ConfigUpdate: server.onConfigUpdate, + SrvInfo: svrInfo, + } + engine, err := backbone.NewBackbone(ctx, input) + if err != nil { + return fmt.Errorf("new backbone failed, err: %v", err) + } + server.Core = engine + + configReady := false + for sleepCnt := 0; sleepCnt < common.APPConfigWaitTime; sleepCnt++ { + if server.Config != nil { + configReady = true + break + } + blog.Infof("waiting for config ready ...") + time.Sleep(time.Second) + } + if !configReady { + blog.Infof("waiting config timeout.") + return errors.New("configuration item not found") + } + + watcher, err := initClient(engine) + if err != nil { + return err + } + + // init sync server logics, then start web service + server.Logics, err = logics.New(engine, server.Config, watcher) + if err != nil { + return fmt.Errorf("new logics failed, err: %v", err) + } + + server.Service = service.New(engine, server.Logics) + + err = backbone.StartServer(ctx, cancel, engine, server.Service.WebService(), true) + if err != nil { + return err + } + + select { + case <-ctx.Done(): + } + + return nil +} + +func initClient(engine *backbone.Engine) (stream.LoopInterface, error) { + // init mongo and redis client + mongoConf, err := engine.WithMongo() + if err != nil { + return nil, err + } + + if err = mongodb.InitClient("", &mongoConf); err != nil { + blog.Errorf("init mongo client failed, err: %v, conf: %+v", err, mongoConf) + return nil, err + } + + watchMongoConf, dbErr := engine.WithMongo("watch") + if dbErr != nil { + blog.Errorf("new watch mongo client failed, err: %v", dbErr) + return nil, dbErr + } + + if err = mongodb.InitClient("watch", &watchMongoConf); err != nil { + blog.Errorf("init watch mongo client failed, err: %v, conf: %+v", err, watchMongoConf) + return nil, err + } + + redisConf, err := engine.WithRedis() + if err != nil { + return nil, err + } + + if err = redis.InitClient("redis", &redisConf); err != nil { + blog.Errorf("init redis client failed, err: %v, conf: %+v", err, redisConf) + return nil, err + } + + watcher, err := stream.NewLoopStream(mongoConf.GetMongoConf(), engine.ServiceManageInterface) + if err != nil { + blog.Errorf("new loop watch stream failed, err: %v", err) + return nil, err + } + return watcher, nil +} + +func (s *SyncServer) onConfigUpdate(previous, current cc.ProcessConfig) { + s.Config = new(logics.Config) + s.Config.FullTextSearch = new(fulltextsearch.Config) + blog.Infof("config updated, new config: %s", string(current.ConfigData)) + + err := cc.UnmarshalKey("syncServer", s.Config) + if err != nil { + return + } + + s.Config.FullTextSearch.Es, err = elasticsearch.ParseConfig("es") + if err != nil { + blog.Warnf("parse es config failed: %v", err) + } +} diff --git a/src/scene_server/sync_server/logics/full-text-search/cache/cache.go b/src/scene_server/sync_server/logics/full-text-search/cache/cache.go new file mode 100644 index 0000000000..3f6c164a7c --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/cache/cache.go @@ -0,0 +1,261 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package cache defines full-text search caching logics +package cache + +import ( + "context" + "errors" + "sync" + + "configcenter/src/apimachinery/cacheservice" + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/json" + "configcenter/src/common/mapstr" + "configcenter/src/common/metadata" + "configcenter/src/common/util" + ferrors "configcenter/src/scene_server/sync_server/logics/full-text-search/errors" + cachekey "configcenter/src/source_controller/cacheservice/cache/common/key" + "configcenter/src/storage/driver/mongodb" +) + +const synUser = "cc_full_text_search_sync" + +func getCacheInfo(cli cacheservice.Cache, data, desInfo interface{}, typ cachekey.KeyType, + kind cachekey.KeyKind) error { + + ctx := context.Background() + header := util.BuildHeader(synUser, common.BKDefaultOwnerID) + + generator, err := cachekey.GetKeyGenerator(typ) + if err != nil { + blog.Errorf("get %s key generator failed, err: %v", typ, err) + return err + } + + redisKey, err := generator.GenerateRedisKey(kind, data) + if err != nil { + blog.Errorf("get %s kind: %s redis key from data: %+v failed, err: %v", typ, kind, data, err) + return err + } + + opt := &metadata.ListCommonCacheWithKeyOpt{ + Kind: string(kind), + Keys: []string{redisKey}, + } + + infoJs, err := cli.CommonRes().ListWithKey(ctx, header, string(typ), opt) + if err != nil { + blog.Errorf("list %s data from cache failed, err: %v, opt: %+v", typ, err, opt) + return err + } + + err = json.Unmarshal([]byte(infoJs), desInfo) + if err != nil { + blog.Errorf("unmarshal %s cache info %s failed, err: %v", typ, infoJs, err) + return err + } + + return nil +} + +// GetQuotedInfoByObjID get the quoted object id related property id and src obj id +func GetQuotedInfoByObjID(cli cacheservice.Cache, objID, supplierAccount string) (bool, string, string) { + // get quoted info from cache + quoteInfo := make([]metadata.ModelQuoteRelation, 0) + err := getCacheInfo(cli, objID, "eInfo, cachekey.ModelQuoteRelType, cachekey.DestModelKind) + if err == nil { + for _, relation := range quoteInfo { + if relation.SupplierAccount == supplierAccount { + return true, quoteInfo[0].PropertyID, quoteInfo[0].SrcModel + } + } + + return false, "", "" + } + + // get quoted info from db by dest model id + cond := mapstr.MapStr{ + common.BKDestModelField: objID, + common.BkSupplierAccount: supplierAccount, + } + exists := false + rel := new(metadata.ModelQuoteRelation) + + ferrors.FatalErrHandler(200, 100, func() error { + err := mongodb.Client().Table(common.BKTableNameModelQuoteRelation).Find(cond).One(context.Background(), &rel) + if err != nil { + if mongodb.Client().IsNotFoundError(err) { + return nil + } + blog.Errorf("get model quote relation failed, cond: %+v, err: %v", cond, err) + return err + } + + exists = true + return nil + }) + + if !exists { + return false, "", "" + } + + return true, rel.PropertyID, rel.SrcModel +} + +// GetModelInfoByObjID get model info by object id +func GetModelInfoByObjID(cli cacheservice.Cache, objID string) (mapstr.MapStr, bool) { + // get model info from cache + objects := make([]mapstr.MapStr, 0) + err := getCacheInfo(cli, objID, &objects, cachekey.ModelType, cachekey.ObjIDKind) + if err == nil { + if len(objects) == 0 { + return make(mapstr.MapStr), false + } + + return objects[0], true + } + + // get model info from db by object id + cond := mapstr.MapStr{common.BKObjIDField: objID} + exists := false + data := make(mapstr.MapStr) + + ferrors.FatalErrHandler(200, 100, func() error { + err = mongodb.Client().Table(common.BKTableNameObjDes).Find(cond).One(context.Background(), &data) + if err != nil { + if mongodb.Client().IsNotFoundError(err) { + return nil + } + blog.Errorf("get model data failed, cond: %+v, err: %v", cond, err) + return err + } + + exists = true + return nil + }) + + return data, exists +} + +// GetPropertyInfoByObjID get property id to info map by object id +func GetPropertyInfoByObjID(cli cacheservice.Cache, objID string) (map[string]mapstr.MapStr, bool) { + properties := getPropertiesByObjID(cli, objID) + + if len(properties) == 0 { + return make(map[string]mapstr.MapStr), false + } + + propertyInfo := make(map[string]mapstr.MapStr) + for _, property := range properties { + propID := util.GetStrByInterface(property[common.BKPropertyIDField]) + propertyInfo[propID] = property + } + + return propertyInfo, true +} + +func getPropertiesByObjID(cli cacheservice.Cache, objID string) []mapstr.MapStr { + // get model info from cache + properties := make([]mapstr.MapStr, 0) + err := getCacheInfo(cli, objID, &properties, cachekey.AttributeType, cachekey.ObjIDKind) + if err != nil { + // get model info from db to compensate + cond := mapstr.MapStr{common.BKObjIDField: objID} + + ferrors.FatalErrHandler(200, 100, func() error { + err = mongodb.Client().Table(common.BKTableNameObjAttDes).Find(cond).All(context.Background(), &properties) + if err != nil { + blog.Errorf("get model attribute data failed, cond: %+v, err: %v", cond, err) + return err + } + + return nil + }) + } + + return properties +} + +// EnumIDToName change instance data enum id to enum name. +func EnumIDToName(cli cacheservice.Cache, document mapstr.MapStr, objID string) mapstr.MapStr { + properties := getPropertiesByObjID(cli, objID) + + if len(properties) == 0 { + return document + } + + for _, property := range properties { + propType := util.GetStrByInterface(property[common.BKPropertyTypeField]) + + if propType != common.FieldTypeEnum { + continue + } + + propID := util.GetStrByInterface(property[common.BKPropertyIDField]) + if _, ok := document[propID]; !ok { + continue + } + + docVal, ok := document[propID].(string) + if !ok { + continue + } + + option, err := metadata.ParseEnumOption(property[common.BKOptionField]) + if err != nil { + blog.Errorf("parse %v enum option failed, err: %v", property, err) + continue + } + + for _, opt := range option { + if opt.ID == docVal { + document[propID] = opt.Name + break + } + } + } + + return document +} + +// ResPoolBizIDMap is used to judge if biz/set... is in resource pool +var ResPoolBizIDMap = sync.Map{} + +// InitResourcePoolBiz initialize resource pool biz info +// NOTE: right now resource pool cannot be operated, so we don't need to change it. +func InitResourcePoolBiz() error { + resPoolCond := mapstr.MapStr{common.BKDefaultField: common.DefaultAppFlag} + bizs := make([]metadata.BizInst, 0) + err := mongodb.Client().Table(common.BKTableNameBaseApp).Find(resPoolCond).Fields(common.BKAppIDField). + All(context.Background(), &bizs) + if err != nil { + return err + } + + if len(bizs) == 0 { + return errors.New("there's no resource pool biz") + } + + for _, biz := range bizs { + ResPoolBizIDMap.Store(biz.BizID, struct{}{}) + } + + return nil +} diff --git a/src/scene_server/sync_server/logics/full-text-search/data_syncer.go b/src/scene_server/sync_server/logics/full-text-search/data_syncer.go new file mode 100644 index 0000000000..5c7c462941 --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/data_syncer.go @@ -0,0 +1,188 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package fulltextsearch + +import ( + "context" + "errors" + "fmt" + + "configcenter/src/common/blog" + "configcenter/src/common/mapstr" + "configcenter/src/common/metadata" + ferrors "configcenter/src/scene_server/sync_server/logics/full-text-search/errors" + "configcenter/src/scene_server/sync_server/logics/full-text-search/parser" + "configcenter/src/scene_server/sync_server/logics/full-text-search/types" + + "github.com/olivere/elastic/v7" +) + +// dataSyncer is the es data syncer +type dataSyncer struct { + index string + parser parser.Parser + bulk *elastic.BulkService + requests []elastic.BulkableRequest +} + +// newDataSyncer new dataSyncer +func newDataSyncer(esCli *elastic.Client, index string) (*dataSyncer, error) { + _, exists := types.IndexMap[index] + if !exists { + return nil, fmt.Errorf("index %s is invalid", index) + } + + return &dataSyncer{ + index: index, + parser: parser.IndexParserMap[index], + bulk: esCli.Bulk(), + }, nil +} + +// addUpsertReq add upsert request to es bulk request, returns if the data is valid and needs to be upserted +func (s *dataSyncer) addUpsertReq(coll, oid string, data []mapstr.MapStr, rid string) bool { + if len(data) == 0 { + blog.Errorf("upsert data is empty, coll: %s, oid: %s, rid: %s", coll, oid, rid) + return false + } + + skip, doc, err := s.parser.ParseData(data, coll, rid) + if err != nil { + blog.Errorf("parse %s data %+v failed, err: %v, rid: %s", coll, data, err, rid) + return false + } + + if skip { + return false + } + + id := s.parser.GenEsID(coll, oid) + + req := elastic.NewBulkUpdateRequest(). + Index(types.GetIndexName(s.index)). + RetryOnConflict(10). + Id(id) + + _, exists := doc[metadata.TablePropertyName] + if exists { + // upsert document with nested table fields by script, this will upsert the nested data to the exact value + req.Script(elastic.NewScriptInline(`ctx._source=params`).Params(doc)).Upsert(doc) + } else { + req.DocAsUpsert(true).Doc(doc) + } + + if _, err = req.Source(); err != nil { + blog.Errorf("upsert data is invalid, err: %v, id: %s, data: %+v, rid: %s", err, id, data, rid) + return false + } + + s.requests = append(s.requests, req) + + return true +} + +// addWatchDeleteReq add watch data delete request to es bulk request, returns if the data needs to be deleted +func (s *dataSyncer) addWatchDeleteReq(collOidMap map[string][]string, rid string) bool { + if len(collOidMap) == 0 { + return false + } + + needDelIDs, extraRequests, needDelAll := s.parser.ParseWatchDeleteData(collOidMap, rid) + s.requests = append(s.requests, extraRequests...) + + if needDelAll { + needDelIDs = make([]string, 0) + for coll, oids := range collOidMap { + for _, oid := range oids { + needDelIDs = append(needDelIDs, s.parser.GenEsID(coll, oid)) + } + } + } + + for _, id := range needDelIDs { + req := elastic.NewBulkDeleteRequest().Index(types.GetIndexName(s.index)).Id(id) + s.requests = append(s.requests, req) + } + + return true +} + +// addDeleteReq add es data delete request to es bulk request, returns if the data needs to be deleted +func (s *dataSyncer) addEsDeleteReq(delEsIDs []string, rid string) bool { + if len(delEsIDs) == 0 { + blog.Errorf("es delete ids is empty, rid: %s", rid) + return false + } + + for _, id := range delEsIDs { + req := elastic.NewBulkDeleteRequest().Index(types.GetIndexName(s.index)).Id(id) + s.requests = append(s.requests, req) + } + + return true +} + +// doBulk do es bulk request +func (s *dataSyncer) doBulk(ctx context.Context, rid string) error { + return ferrors.EsRespErrHandler(func() (bool, error) { + if len(s.requests) == 0 { + return false, nil + } + + s.bulk.Reset() + for _, req := range s.requests { + s.bulk.Add(req) + } + + resp, err := s.bulk.Do(ctx) + if err != nil { + blog.Errorf("do bulk request failed, err: %v, requests: %+v, rid: %s", err, s.requests, rid) + return false, err + } + + if resp == nil || !resp.Errors { + return false, nil + } + + if len(resp.Items) != len(s.requests) { + blog.Errorf("bulk response length %d != request length %d, rid: %s", len(resp.Items), len(s.requests), rid) + return false, errors.New("bulk response length != request length") + } + + var retry, fatal bool + retryRequests := make([]elastic.BulkableRequest, 0) + for i, item := range resp.Items { + for _, result := range item { + retry, fatal = ferrors.EsStatusHandler(result.Status) + if !retry { + break + } + + blog.Errorf("do request %+v failed, resp: %+v, rid: %s", s.requests[i], result, rid) + retryRequests = append(retryRequests, s.requests[i]) + break + } + } + + if len(retryRequests) > 0 { + return fatal, errors.New("do bulk request failed") + } + + return false, nil + }) +} diff --git a/src/scene_server/sync_server/logics/full-text-search/errors/error.go b/src/scene_server/sync_server/logics/full-text-search/errors/error.go new file mode 100644 index 0000000000..f55821951f --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/errors/error.go @@ -0,0 +1,126 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package errors defines the full-text search error handler +package errors + +import ( + "math/rand" + "net/http" + "time" + + "github.com/olivere/elastic/v7" +) + +// BasicErrHandler is the basic err handler that retries after an increasing interval +// @param baseTime: the basic retry sleep time in milliseconds +// @param randTime: the random retry sleep time maximum value in milliseconds +func BasicErrHandler(baseTime, randTime int, operator func() (bool, error)) { + retry := 0 + for { + needRetry, err := operator() + if err == nil { + return + } + + if !needRetry { + return + } + + retry++ + + rand.Seed(time.Now().UnixNano()) + time.Sleep(time.Millisecond * time.Duration(rand.Intn(randTime)+baseTime) * time.Duration(retry)) + } +} + +// FatalErrHandler is the err handler for fatal error that must be retried +// strategy: always retry, because when there is a fatal error, every operation will be failed +func FatalErrHandler(baseTime, randTime int, operator func() error) { + BasicErrHandler(baseTime, randTime, func() (bool, error) { + err := operator() + return true, err + }) +} + +// EsStatusHandler is the es status handler, returns if the request should be retied & if it's a fatal error +func EsStatusHandler(status int) (bool, bool) { + // the request is successful + if status >= 200 && status <= 299 { + return false, false + } + + // skip the invalid requests + if elastic.IsForbidden(status) || elastic.IsUnauthorized(status) || + elastic.IsStatusCode(status, http.StatusBadRequest) { + return false, false + } + + // ignores version conflict error + if elastic.IsConflict(status) { + return true, false + } + + // this status mostly means index not exists, so we sleep for a long time to wait until index is recovered + if elastic.IsNotFound(status) { + time.Sleep(5 * time.Minute) + return true, true + } + + // sleep for a long time to lower the request num + if elastic.IsTimeout(status) || elastic.IsStatusCode(status, http.StatusTooManyRequests) { + time.Sleep(2 * time.Minute) + return true, false + } + + return true, false +} + +// EsErrRetryCount is the retry count for es error +const EsErrRetryCount = 5 + +// EsRespErrHandler is the response err handler for es operation +func EsRespErrHandler(operator func() (bool, error)) error { + retry := 1 + var err error + + BasicErrHandler(200, 100, func() (bool, error) { + var fatal bool + fatal, err = operator() + if err == nil { + return false, nil + } + + if elastic.IsConnErr(err) { + return true, err + } + + if fatal { + return true, err + } + + if retry == EsErrRetryCount { + return false, err + } + + time.Sleep(time.Duration(retry) * 100 * time.Millisecond) + retry++ + return true, err + }) + + return err +} diff --git a/src/scene_server/sync_server/logics/full-text-search/full_text_search.go b/src/scene_server/sync_server/logics/full-text-search/full_text_search.go new file mode 100644 index 0000000000..ff77ddcb44 --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/full_text_search.go @@ -0,0 +1,119 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package fulltextsearch defines the full-text search synchronization logics +package fulltextsearch + +import ( + "context" + "fmt" + "strconv" + + types "configcenter/pkg/types/sync/full-text-search" + "configcenter/src/apimachinery/cacheservice" + "configcenter/src/common/blog" + "configcenter/src/common/metadata" + "configcenter/src/common/util" + "configcenter/src/scene_server/sync_server/logics/full-text-search/parser" + "configcenter/src/scene_server/sync_server/logics/full-text-search/upgrader" + "configcenter/src/storage/stream" + "configcenter/src/thirdparty/elasticsearch" +) + +var _ SyncI = new(fullTextSearch) + +// SyncI defines the full-text search synchronization interface +type SyncI interface { + SyncData(ctx context.Context, opt *types.SyncDataOption, rid string) error + Migrate(ctx context.Context, rid string) (*types.MigrateResult, error) +} + +// New full-text search sync interface instance +func New(conf *Config, cacheCli cacheservice.Cache, watcher stream.LoopInterface) (SyncI, error) { + if !conf.EnableSync { + return new(fullTextSearch), nil + } + + if conf.Es.FullTextSearch != "on" { + return new(fullTextSearch), nil + } + + f := &fullTextSearch{ + enableSync: conf.EnableSync, + } + + var err error + f.esCli, err = elasticsearch.NewEsClient(conf.Es) + if err != nil { + blog.Errorf("create es client failed, err: %v, conf: %+v", err, conf) + return nil, err + } + + f.cacheCli = cacheCli + + if conf.IndexShardNum <= 0 || conf.IndexReplicaNum <= 0 { + return nil, fmt.Errorf("index shard num %d or replica num %d is invalid", conf.IndexShardNum, + conf.IndexReplicaNum) + } + + indexSetting := metadata.ESIndexMetaSettings{ + Shards: strconv.Itoa(conf.IndexShardNum), + Replicas: strconv.Itoa(conf.IndexReplicaNum), + } + + upgrader.InitUpgrader(f.esCli.Client, indexSetting) + if _, err = f.Migrate(context.Background(), util.GenerateRID()); err != nil { + blog.Errorf("migrate failed, err: %v, conf: %+v", err, conf) + return nil, err + } + + parserClientSet := &parser.ClientSet{ + EsCli: f.esCli.Client, + CacheCli: cacheCli, + } + if err = parser.InitParser(parserClientSet); err != nil { + blog.Errorf("init parser failed, err: %v", err) + return nil, err + } + + if err = f.incrementalSync(watcher); err != nil { + blog.Errorf("start full-text search incremental sync failed, err: %v, conf: %+v", err, conf) + return nil, err + } + + return f, nil +} + +// Config defines full-text search sync configuration +type Config struct { + // EnableSync defines if full-text search sync is enabled + EnableSync bool `mapstructure:"enableSync"` + // IndexShardNum defines the number of es index shards + IndexShardNum int `mapstructure:"indexShardNum"` + // IndexReplicaNum defines the number of es index replicas + IndexReplicaNum int `mapstructure:"indexReplicaNum"` + + // Es elasticsearch configuration + Es *elasticsearch.EsConfig +} + +// fullTextSearch implements the full-text search synchronization interface +type fullTextSearch struct { + enableSync bool + esCli *elasticsearch.EsSrv + cacheCli cacheservice.Cache +} diff --git a/src/scene_server/sync_server/logics/full-text-search/incr_sync.go b/src/scene_server/sync_server/logics/full-text-search/incr_sync.go new file mode 100644 index 0000000000..4623d70c87 --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/incr_sync.go @@ -0,0 +1,271 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package fulltextsearch + +import ( + "context" + "errors" + "fmt" + "time" + + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/mapstr" + "configcenter/src/common/metadata" + "configcenter/src/common/util" + ftypes "configcenter/src/scene_server/sync_server/logics/full-text-search/types" + "configcenter/src/scene_server/sync_server/logics/token_handler" + "configcenter/src/source_controller/cacheservice/event" + "configcenter/src/storage/driver/mongodb" + "configcenter/src/storage/stream" + "configcenter/src/storage/stream/types" +) + +// incrSyncer is the full-text search data incremental syncer +type incrSyncer struct { + index string + metrics *event.EventMetrics + tokenHandler *tokenhandler.TokenHandler + dataSyncer *dataSyncer +} + +const ( + // watchKeyPrefix is the prefix of full-text search watch key + watchKeyPrefix = "full_text_search:" + // watchBatchSize is the batch size of full-text search watch + watchBatchSize = 1000 +) + +// incrementalSync do full-text search incremental sync +func (f fullTextSearch) incrementalSync(watcher stream.LoopInterface) error { + if !f.enableSync { + return errors.New("full text search sync is disabled") + } + + ctx := context.Background() + rid := util.GenerateRID() + + blog.Infof("start full-text search incremental sync, rid: %s", rid) + + for _, index := range ftypes.AllIndexNames { + key := genWatchKey(index) + + dataSyncer, err := newDataSyncer(f.esCli.Client, index) + if err != nil { + return err + } + + syncer := incrSyncer{ + index: index, + metrics: event.InitialMetrics(ftypes.IndexCollMap[index], "watch"), + dataSyncer: dataSyncer, + } + + // create token handler + tokenHandler, err := tokenhandler.New(key, mongodb.Client("watch"), syncer.metrics) + if err != nil { + return err + } + syncer.tokenHandler = tokenHandler + + exists, startAtTime, err := syncer.tokenHandler.GetStartWatchTime(ctx) + if err != nil { + blog.Errorf("get start watch time for %s failed, err: %v, rid: %s", key, err, rid) + return err + } + + // init watch token data and all es info if it's the first time to sync this index(token not exists) + if !exists { + if err = syncer.tokenHandler.InitWatchToken(ctx); err != nil { + return err + } + + if err = f.syncDataByIndex(ctx, index, rid); err != nil { + return err + } + } + + watchOpts := &types.WatchOptions{ + Options: types.Options{ + EventStruct: new(mapstr.MapStr), + StartAtTime: &startAtTime, + WatchFatalErrorCallback: syncer.tokenHandler.ResetWatchToken, + }, + } + + addCollWatchOpt(index, watchOpts) + + opts := &types.LoopBatchOptions{ + LoopOptions: types.LoopOptions{ + Name: key, + WatchOpt: watchOpts, + TokenHandler: syncer.tokenHandler, + }, + EventHandler: &types.BatchHandler{ + DoBatch: syncer.doBatch, + }, + BatchSize: watchBatchSize, + } + + if err = watcher.WithBatch(opts); err != nil { + blog.Errorf("watch batch failed, err: %v, opt: %+v, rid: %s", err, opts, rid) + return err + } + } + + return nil +} + +// addCollWatchOpt add collection watch options +func addCollWatchOpt(index string, watchOpts *types.WatchOptions) { + switch index { + case metadata.IndexNameModel: + // watch model and attribute table + watchOpts.Options.CollectionFilter = mapstr.MapStr{ + common.BKDBIN: []string{common.BKTableNameObjDes, common.BKTableNameObjAttDes}, + } + case metadata.IndexNameObjectInstance: + // watch all tables with the prefix of instance table + watchOpts.Options.CollectionFilter = mapstr.MapStr{ + common.BKDBLIKE: event.ObjInstTablePrefixRegex, + } + default: + watchOpts.Options.Collection = ftypes.IndexCollMap[index] + } +} + +// genWatchKey generate full-text search watch key +func genWatchKey(index string) string { + return watchKeyPrefix + index +} + +// doBatch handle one batch of full-text search watch events +func (s incrSyncer) doBatch(es []*types.Event) (retry bool) { + eventLen := len(es) + if eventLen == 0 { + return false + } + + rid := es[0].ID() + hasError := true + + // collect event related metrics + start := time.Now() + defer func() { + if retry { + s.metrics.CollectRetryError() + } + if hasError { + return + } + s.metrics.CollectCycleDuration(time.Since(start)) + }() + + // aggregate events + es, keys := s.aggregateEvents(es) + + // parse events and do es operation + hasChange := false + collOidMap := make(map[string][]string) + + for _, e := range es { + switch e.OperationType { + case types.Insert, types.Update, types.Replace: + docPtr, ok := e.Document.(*mapstr.MapStr) + if !ok || docPtr == nil || *docPtr == nil { + blog.Errorf("[%s] document is empty, event: %+v, rid: %s", s.index, e, rid) + continue + } + + if s.dataSyncer.addUpsertReq(e.Collection, e.Oid, []mapstr.MapStr{*docPtr}, rid) { + hasChange = true + } + case types.Delete: + collOidMap[e.Collection] = append(collOidMap[e.Collection], e.Oid) + // since following event cannot be parsed, skip them and do not retry + case types.Invalidate: + blog.Errorf("[%s] received invalid event operation type, doc: %s, rid: %s", s.index, e.DocBytes, rid) + case types.Drop: + blog.Errorf("[%s] received drop table event operation type, doc: %s, rid: %s", s.index, e.DocBytes, rid) + default: + blog.Errorf("[%s] received unsupported event operation type: %s, doc: %s, rid: %s", s.index, + e.OperationType, e.DocBytes, rid) + } + } + + if s.dataSyncer.addWatchDeleteReq(collOidMap, rid) { + hasChange = true + } + + if !hasChange { + blog.Infof("[%s] all events are invalid or do not need to sync, keys: %+v, rid: %s", s.index, keys, rid) + return false + } + + if err := s.dataSyncer.doBulk(context.Background(), rid); err != nil { + blog.Infof("[%s] do es bulk request failed, err: %v, rid: %s", s.index, err, rid) + return false + } + + blog.Infof("[%s] full-text search incremental sync success, keys: %+v, rid: %s", s.index, keys, rid) + hasError = false + return false +} + +// aggregateEvents deduplicate events by collection and oid +func (s incrSyncer) aggregateEvents(es []*types.Event) ([]*types.Event, []string) { + eventMap := make(map[string]*types.Event) + keys := make([]string, 0) + + for i := len(es) - 1; i >= 0; i-- { + e := es[i] + + // collect event's basic metrics + s.metrics.CollectBasic(e) + + key := genEventUniqueKey(e.Collection, e.Oid) + keys = append(keys, key) + + lastEvent, exists := eventMap[key] + if !exists { + eventMap[key] = e + continue + } + + switch e.OperationType { + case types.Insert: + if lastEvent.OperationType == types.Delete { + delete(eventMap, key) + continue + } + eventMap[key].OperationType = types.Insert + } + } + + events := make([]*types.Event, 0) + for _, e := range eventMap { + events = append(events, e) + } + + return events, keys +} + +// genEventUniqueKey generate event unique key +func genEventUniqueKey(coll, oid string) string { + return fmt.Sprintf("%s.%s", coll, oid) +} diff --git a/src/scene_server/sync_server/logics/full-text-search/parser/biz_res.go b/src/scene_server/sync_server/logics/full-text-search/parser/biz_res.go new file mode 100644 index 0000000000..1ed2cac486 --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/parser/biz_res.go @@ -0,0 +1,59 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package parser + +import ( + "errors" + + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/mapstr" + "configcenter/src/common/util" + "configcenter/src/scene_server/sync_server/logics/full-text-search/cache" +) + +// bizResParser is the data parser for biz resource +type bizResParser struct { + *objInstParser +} + +func newBizResParser(index string, cs *ClientSet) *bizResParser { + return &bizResParser{newObjInstParser(index, cs)} +} + +// ParseData parse mongo data to es data +func (p *bizResParser) ParseData(info []mapstr.MapStr, coll string, rid string) (bool, mapstr.MapStr, error) { + if len(info) == 0 { + return false, nil, errors.New("data is empty") + } + data := info[0] + + // do not sync resource pool resource to es + bizID, err := util.GetIntByInterface(data[common.BKAppIDField]) + if err != nil { + blog.Errorf("parse %s biz id failed, err: %v, data: %+v, rid: %s", p.index, err, data, rid) + return false, nil, err + } + + if _, exists := cache.ResPoolBizIDMap.Load(bizID); exists { + blog.Errorf("%s biz id %d is resource pool, skip, data: %+v, rid: %s", p.index, bizID, data, rid) + return true, nil, nil + } + + return p.objInstParser.ParseData(info, coll, rid) +} diff --git a/src/scene_server/sync_server/logics/full-text-search/parser/common.go b/src/scene_server/sync_server/logics/full-text-search/parser/common.go new file mode 100644 index 0000000000..dc10a2e211 --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/parser/common.go @@ -0,0 +1,87 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package parser + +import ( + "errors" + "fmt" + + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/mapstr" + "configcenter/src/common/metadata" + "configcenter/src/scene_server/sync_server/logics/full-text-search/types" + + "github.com/olivere/elastic/v7" +) + +// commonParser is the data parser for common object instance, including table instance +type commonParser struct { + index string + cs *ClientSet +} + +func newCommonParser(index string, cs *ClientSet) *commonParser { + return &commonParser{index: index, cs: cs} +} + +// GenEsID generate es id from mongo oid +func (p *commonParser) GenEsID(coll, oid string) string { + return fmt.Sprintf("%s:%s", oid, indexIdentifierMap[p.index]) +} + +// ParseData parse mongo data to es data +func (p *commonParser) ParseData(info []mapstr.MapStr, coll string, rid string) (bool, mapstr.MapStr, error) { + if len(info) == 0 { + return false, nil, errors.New("data is empty") + } + data := info[0] + + // generate es doc + esDoc := mapstr.MapStr{ + metadata.IndexPropertyBKObjID: data[common.BKObjIDField], + metadata.IndexPropertyBKSupplierAccount: data[common.BKOwnerIDField], + metadata.IndexPropertyBKBizID: data[common.BKAppIDField], + } + + for _, field := range types.IndexExtraFieldsMap[p.index] { + esDoc[field] = data[extraEsFieldMap[field]] + } + + for _, field := range types.IndexExcludeFieldsMap[p.index] { + delete(esDoc, field) + } + + // parse es keywords + data = cleanCommonKeywordData(data, p.index) + keywords, err := parseKeywords(data) + if err != nil { + blog.Errorf("parse keywords failed, err: %v, data: %+v, index: %s, rid: %s", err, data, p.index, rid) + return false, nil, err + } + + esDoc[metadata.IndexPropertyKeywords] = keywords + + return false, esDoc, nil +} + +// ParseWatchDeleteData parse delete data from mongodb watch +func (p *commonParser) ParseWatchDeleteData(collOidMap map[string][]string, rid string) ([]string, + []elastic.BulkableRequest, bool) { + return nil, nil, true +} diff --git a/src/scene_server/sync_server/logics/full-text-search/parser/common_inst.go b/src/scene_server/sync_server/logics/full-text-search/parser/common_inst.go new file mode 100644 index 0000000000..551da9635f --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/parser/common_inst.go @@ -0,0 +1,284 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package parser + +import ( + "context" + "errors" + "fmt" + "strconv" + + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/json" + "configcenter/src/common/mapstr" + "configcenter/src/common/metadata" + "configcenter/src/common/util" + "configcenter/src/scene_server/sync_server/logics/full-text-search/cache" + ferrors "configcenter/src/scene_server/sync_server/logics/full-text-search/errors" + "configcenter/src/storage/driver/mongodb" + + "github.com/olivere/elastic/v7" + "github.com/tidwall/gjson" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// commonObjInstParser is the data parser for common object instance, including table instance +type commonObjInstParser struct { + *objInstParser +} + +func newCommonObjInstParser(index string, cs *ClientSet) *commonObjInstParser { + return &commonObjInstParser{newObjInstParser(index, cs)} +} + +// GenEsID generate es id from mongo oid +func (p *commonObjInstParser) GenEsID(coll, oid string) string { + return fmt.Sprintf("%s:%s", oid, indexIdentifierMap[p.index]) +} + +// ParseData parse mongo data to es data +func (p *commonObjInstParser) ParseData(info []mapstr.MapStr, coll string, rid string) (bool, mapstr.MapStr, error) { + if len(info) == 0 { + return false, nil, errors.New("data is empty") + } + data := info[0] + + objID := GetObjIDByData(coll, data) + + // parse table instance separately + supplierAccount := util.GetStrByInterface(data[common.BkSupplierAccount]) + isQuoted, propertyID, srcObjID := cache.GetQuotedInfoByObjID(p.cs.CacheCli, objID, supplierAccount) + if isQuoted { + return p.parseQuotedInst(data, propertyID, srcObjID, rid) + } + + return p.objInstParser.ParseData(info, coll, rid) +} + +// parseQuotedInst parse quoted instance mongo data to es data +func (p *commonObjInstParser) parseQuotedInst(data mapstr.MapStr, propertyID, objID string, rid string) (bool, + mapstr.MapStr, error) { + + ctx := context.Background() + + instID, err := util.GetInt64ByInterface(data[common.BKInstIDField]) + if err != nil { + blog.Errorf("[%s] parse quote inst id failed, err: %v, data: %+v, rid: %s", err, data, rid) + return false, nil, errors.New("quote inst id is invalid") + } + + // Note: instID == 0 表明表格实例没有与模型实例表进行关联,无需处理 + if instID == 0 { + return true, nil, nil + } + + oid, err := parseOid(data[common.MongoMetaID]) + if err != nil { + return false, nil, err + } + + account, err := convMetaIDToStr(data, common.BKOwnerIDField) + if err != nil { + blog.Errorf("[%s] parse supplier account failed, err: %v, data: %+v, rid: %s", err, data, rid) + return false, nil, errors.New("supplier account is invalid") + } + + index := getEsIndexByObjID(objID) + + document, keywords, err := p.analysisTableDocument(propertyID, oid, data) + if err != nil { + blog.Errorf("analysis table document failed, err: %v", err) + return false, nil, err + } + + // 直接更新 es文档 + succeed, err := p.updateTablePropertyEsDoc(index, strconv.FormatInt(instID, 10), propertyID, oid, keywords) + if err != nil { + blog.Errorf("update table property es doc failed, err: %v", err) + return false, nil, err + } + + if succeed { + return true, nil, nil + } + + // 更新败降级处理,查询实例数据,如果es文档不存在,直接创建es文档 + id, err := p.getEsIDByMongoID(objID, account, instID, rid) + if err != nil { + return false, nil, err + } + + err = ferrors.EsRespErrHandler(func() (bool, error) { + resp, err := p.cs.EsCli.Update().Index(index).DocAsUpsert(true).RetryOnConflict(10). + Doc(document).Id(id).Do(ctx) + if err != nil { + blog.Errorf("upsert parent inst failed, err: %v, id: %s, doc: %+v, rid: %s", err, id, document, rid) + return false, err + } + + retry, fatal := ferrors.EsStatusHandler(resp.Status) + if !retry { + return false, nil + } + + return fatal, errors.New("upsert parent inst failed") + }) + return true, nil, nil +} + +// updateTablePropertyEsDoc update table property es doc. +func (p *commonObjInstParser) updateTablePropertyEsDoc(index, instIDStr, propID, oid string, keywords []string) (bool, + error) { + + keywordStr, err := json.MarshalToString(keywords) + if err != nil { + return false, err + } + + var succeed bool + err = ferrors.EsRespErrHandler(func() (bool, error) { + resp, err := p.cs.EsCli.UpdateByQuery(index). + ProceedOnVersionConflict(). + Query(elastic.NewMatchQuery(metadata.IndexPropertyID, instIDStr)). + Script(elastic.NewScriptInline(fmt.Sprintf(updateTableScript, propID, propID, propID, oid, + keywordStr))). + Do(context.Background()) + if err != nil { + blog.Errorf("update table property failed, err: %v, inst id: %s, property id: %s", err, instIDStr, propID) + return false, err + } + + for _, failure := range resp.Failures { + retry, fatal := ferrors.EsStatusHandler(failure.Status) + if !retry { + break + } + + return fatal, errors.New("update table property failed") + } + + succeed = resp.Total == 1 + return false, nil + }) + + return succeed, err +} + +// getEsIDByMongoID get the es id by mongo document id. +// 如果mongo的实例数据不存在,说明是脏数据,直接返回错误。 +func (p *commonObjInstParser) getEsIDByMongoID(objID, supplierAccount string, id int64, rid string) (string, error) { + coll := common.GetInstTableName(objID, supplierAccount) + filter := mapstr.MapStr{common.GetInstIDField(objID): id} + + doc := make(mapstr.MapStr) + ferrors.FatalErrHandler(200, 100, func() error { + err := mongodb.Client().Table(coll).Find(filter).Fields(common.MongoMetaID).One(context.Background(), &doc) + if err != nil { + blog.Errorf("get mongo _id failed, obj: %s, id: %d, err: %v, rid: %s", objID, id, err, rid) + return err + } + return nil + }) + + documentID, ok := doc[common.MongoMetaID].(primitive.ObjectID) + if !ok { + return "", errors.New("missing document metadata id") + } + + return p.GenEsID(coll, documentID.Hex()), nil +} + +// analysisTableDocument analysis the table property document. +func (p *commonObjInstParser) analysisTableDocument(propertyID, oid string, originDoc mapstr.MapStr) ( + mapstr.MapStr, []string, error) { + + originDoc = cleanCommonKeywordData(originDoc, p.index) + + delete(originDoc, common.BKFieldID) + delete(originDoc, common.BKInstIDField) + + jsonDoc, err := json.MarshalToString(originDoc) + if err != nil { + return nil, nil, err + } + + keywords := analysisJSONKeywords(gjson.Parse(jsonDoc)) + document := mapstr.MapStr{ + metadata.TablePropertyName: mapstr.MapStr{ + propertyID: mapstr.MapStr{ + oid: keywords, + }, + }, + } + return document, keywords, nil +} + +// ParseWatchDeleteData parse delete data from mongodb watch +func (p *commonObjInstParser) ParseWatchDeleteData(collOidMap map[string][]string, rid string) ([]string, + []elastic.BulkableRequest, bool) { + + delArchives := getDelArchive(collOidMap, rid) + + needDelIDs := make([]string, 0) + + for _, archive := range delArchives { + objID := util.GetStrByInterface(archive.Detail[common.BKObjIDField]) + esID := p.GenEsID(archive.Coll, archive.Oid) + + supplierAccount := util.GetStrByInterface(archive.Detail[common.BkSupplierAccount]) + isQuoted, propID, objID := cache.GetQuotedInfoByObjID(p.cs.CacheCli, objID, supplierAccount) + if !isQuoted { + needDelIDs = append(needDelIDs, esID) + continue + } + + err := p.deleteTablePropertyEsDoc(getEsIndexByObjID(objID), propID, archive.Oid) + if err != nil { + blog.Errorf("delete table property es document failed, err: %v, rid: %s", err, rid) + continue + } + } + + return needDelIDs, nil, true +} + +// deleteTablePropertyEsDoc delete table property instance from es. +func (p *commonObjInstParser) deleteTablePropertyEsDoc(index, propertyID, oid string) error { + return ferrors.EsRespErrHandler(func() (bool, error) { + resp, err := p.cs.EsCli.UpdateByQuery(index). + ProceedOnVersionConflict(). + Query(elastic.NewExistsQuery(fmt.Sprintf(deleteTableQueryScript, propertyID, oid))). + Script(elastic.NewScriptInline(fmt.Sprintf(deleteTableScript, propertyID, oid, propertyID, propertyID))). + Do(context.Background()) + if err != nil { + blog.Errorf("delete table inst failed, err: %v", err) + return false, err + } + + for _, failure := range resp.Failures { + retry, fatal := ferrors.EsStatusHandler(failure.Status) + if !retry { + break + } + + return fatal, errors.New("delete table inst failed") + } + return false, nil + }) +} diff --git a/src/scene_server/sync_server/logics/full-text-search/parser/inst.go b/src/scene_server/sync_server/logics/full-text-search/parser/inst.go new file mode 100644 index 0000000000..9909834aef --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/parser/inst.go @@ -0,0 +1,101 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package parser + +import ( + "errors" + + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/mapstr" + "configcenter/src/common/metadata" + "configcenter/src/common/util" + "configcenter/src/scene_server/sync_server/logics/full-text-search/cache" +) + +// objInstParser is the general data parser for object instance +type objInstParser struct { + *commonParser +} + +func newObjInstParser(index string, cs *ClientSet) *objInstParser { + return &objInstParser{commonParser: newCommonParser(index, cs)} +} + +// ParseData parse mongo data to es data +func (p *objInstParser) ParseData(info []mapstr.MapStr, coll string, rid string) (bool, mapstr.MapStr, error) { + if len(info) == 0 { + return false, nil, errors.New("data is empty") + } + data := info[0] + + objID := GetObjIDByData(coll, data) + + data = cache.EnumIDToName(p.cs.CacheCli, data, objID) + + // get es id + id, err := convMetaIDToStr(data, metadata.GetInstIDFieldByObjID(objID)) + if err != nil { + blog.Errorf("get meta id failed, err: %v, data: %+v, obj: %s, rid: %s", err, data, objID, rid) + return false, nil, err + } + + _, esDoc, err := p.commonParser.ParseData(info, coll, rid) + if err != nil { + return false, nil, err + } + + esDoc[metadata.IndexPropertyID] = id + esDoc[metadata.IndexPropertyDataKind] = metadata.DataKindInstance + esDoc[metadata.IndexPropertyBKObjID] = objID + + if len(info) > 1 { + // parse quoted instance data + quotedData := make(map[string]mapstr.MapStr) + + for i := 1; i < len(info); i++ { + quotedInst := info[i] + quotedOid, err := parseOid(quotedInst[common.MongoMetaID]) + if err != nil { + return false, nil, err + } + propertyID := util.GetStrByInterface(quotedInst[common.BKPropertyIDField]) + + quotedInst = cleanCommonKeywordData(quotedInst, p.index) + delete(quotedInst, common.BKFieldID) + delete(quotedInst, common.BKInstIDField) + delete(quotedInst, common.BKPropertyIDField) + + quotedKeywords, err := parseKeywords(quotedInst) + if err != nil { + blog.Errorf("parse quoted inst %+v keywords failed, err: %v, rid: %s", quotedInst, err, rid) + return false, nil, err + } + + _, exists := quotedData[propertyID] + if !exists { + quotedData[propertyID] = make(mapstr.MapStr) + } + quotedData[propertyID][quotedOid] = quotedKeywords + } + + esDoc[metadata.TablePropertyName] = quotedData + } + + return false, esDoc, nil +} diff --git a/src/scene_server/sync_server/logics/full-text-search/parser/model.go b/src/scene_server/sync_server/logics/full-text-search/parser/model.go new file mode 100644 index 0000000000..0cfa6a2204 --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/parser/model.go @@ -0,0 +1,200 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package parser + +import ( + "errors" + + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/mapstr" + "configcenter/src/common/metadata" + "configcenter/src/common/util" + "configcenter/src/scene_server/sync_server/logics/full-text-search/cache" + + "github.com/olivere/elastic/v7" +) + +// modelParser is the general data parser for model/attribute +type modelParser struct { + *commonParser +} + +func newModelParser(index string, cs *ClientSet) *modelParser { + return &modelParser{newCommonParser(index, cs)} +} + +// ParseData parse mongo data to es data +func (p *modelParser) ParseData(info []mapstr.MapStr, coll string, rid string) (bool, mapstr.MapStr, error) { + if len(info) == 0 { + return false, nil, errors.New("data is empty") + } + data := info[0] + + objID, ok := data[common.BKObjIDField].(string) + if !ok { + blog.Errorf("[%s] object id is invalid, data: %+v, rid: %s", p.index, data, rid) + return false, nil, errors.New("object id is invalid") + } + + // skip table object + supplierAccount := util.GetStrByInterface(data[common.BkSupplierAccount]) + isQuoted, _, _ := cache.GetQuotedInfoByObjID(p.cs.CacheCli, objID, supplierAccount) + if isQuoted { + return true, nil, nil + } + + var model mapstr.MapStr + var propertyData map[string]mapstr.MapStr + + if len(info) > 1 { + // info contains: model, properties + model = info[0] + for i := 1; i < len(info); i++ { + propertyData[util.GetStrByInterface(info[i][common.BKPropertyIDField])] = info[i] + } + } else { + // get model and property info from cache + var exists bool + model, exists = cache.GetModelInfoByObjID(p.cs.CacheCli, objID) + if !exists { + // skip not exists model + return true, nil, nil + } + + propertyData, exists = cache.GetPropertyInfoByObjID(p.cs.CacheCli, objID) + if !exists { + propertyData = make(map[string]mapstr.MapStr) + } + } + + keywords := []string{objID, util.GetStrByInterface(model[common.BKObjNameField])} + + // all attributes with model metadata is ONE elastic document. + tableAttrs := make([]mapstr.MapStr, 0) + for _, attribute := range propertyData { + propertyType, err := convMetaIDToStr(attribute, common.BKPropertyTypeField) + if err != nil { + blog.Errorf("[%s] property type is invalid, data: %+v, rid: %s", p.index, data, rid) + continue + } + + if propertyType == common.FieldTypeInnerTable { + tableAttrs = append(tableAttrs, attribute) + } + + keywords = append(keywords, util.GetStrByInterface(attribute[common.BKPropertyIDField]), + util.GetStrByInterface(attribute[common.BKPropertyNameField])) + } + + // build elastic document. + document := mapstr.MapStr{ + // we use meta_bk_obj_id to search model, set this id to special null value + metadata.IndexPropertyID: nullMetaID, + metadata.IndexPropertyDataKind: metadata.DataKindModel, + metadata.IndexPropertyBKObjID: objID, + metadata.IndexPropertyBKSupplierAccount: model[common.BKOwnerIDField], + metadata.IndexPropertyBKBizID: model[common.BKAppIDField], + metadata.IndexPropertyKeywords: compressKeywords(keywords), + } + + if err := p.updateModelTableProperties(document, tableAttrs, rid); err != nil { + blog.Errorf("parse model table attributes failed, table attr: %+v, rid: %s", tableAttrs, rid) + return false, nil, err + } + + return false, document, nil +} + +// updateModelTableProperties update model table property. +func (p *modelParser) updateModelTableProperties(document mapstr.MapStr, attrs []mapstr.MapStr, rid string) error { + if len(attrs) == 0 { + return nil + } + + tables := make(mapstr.MapStr) + for _, attribute := range attrs { + propertyID, err := convMetaIDToStr(attribute, common.BKPropertyIDField) + if err != nil { + blog.Errorf("parse property id failed, err: %v, attr: %+v, rid: %s", err, attribute, rid) + continue + } + + option, err := metadata.ParseTableAttrOption(attribute[common.BKOptionField]) + if err != nil { + blog.Errorf("parse table attr option failed, err: %v, attr: %+v, rid: %s", err, attribute, rid) + continue + } + + if len(option.Header) == 0 { + continue + } + + keywords := make([]string, 0) + for _, header := range option.Header { + keywords = append(keywords, header.PropertyID, header.PropertyName) + } + + // 0 为占位符,保持搜索时模型和实例的统一 + // todo 临时方案,后续优化 + tables[propertyID] = mapstr.MapStr{nullMetaID: compressKeywords(keywords)} + } + + document[metadata.TablePropertyName] = tables + return nil +} + +// ParseWatchDeleteData parse delete model attribute data from mongodb watch +func (p *modelParser) ParseWatchDeleteData(collOidMap map[string][]string, rid string) ([]string, + []elastic.BulkableRequest, bool) { + + needDelIDs := make([]string, 0) + requests := make([]elastic.BulkableRequest, 0) + + for coll, oids := range collOidMap { + switch coll { + case common.BKTableNameObjDes: + for _, oid := range oids { + needDelIDs = append(needDelIDs, p.GenEsID(coll, oid)) + } + case common.BKTableNameObjAttDes: + delArchives := getDelArchive(collOidMap, rid) + + for _, archive := range delArchives { + skip, data, err := p.ParseData([]mapstr.MapStr{archive.Detail}, coll, rid) + if err != nil || skip { + continue + } + + id := p.GenEsID(coll, archive.Oid) + + req := elastic.NewBulkUpdateRequest().DocAsUpsert(true).RetryOnConflict(10).Id(id).Doc(data) + + if _, err = req.Source(); err != nil { + blog.Errorf("upsert data is invalid, err: %v, id: %s, data: %+v, rid: %s", err, id, data, rid) + continue + } + + requests = append(requests, req) + } + + } + } + + return needDelIDs, requests, false +} diff --git a/src/scene_server/sync_server/logics/full-text-search/parser/parser.go b/src/scene_server/sync_server/logics/full-text-search/parser/parser.go new file mode 100644 index 0000000000..5e2f97423a --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/parser/parser.go @@ -0,0 +1,69 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package parser defines the full-text search data parser +package parser + +import ( + "configcenter/src/apimachinery/cacheservice" + "configcenter/src/common/mapstr" + "configcenter/src/common/metadata" + "configcenter/src/scene_server/sync_server/logics/full-text-search/cache" + + "github.com/olivere/elastic/v7" +) + +// Parser defines the es data parser +type Parser interface { + // GenEsID generate es id + GenEsID(coll, oid string) string + // ParseData parse mongo data to es data + // @param info: one mongo data related info, the first one is the data itself, others are optional extra info + ParseData(info []mapstr.MapStr, coll string, rid string) (bool, mapstr.MapStr, error) + // ParseWatchDeleteData parse delete data from mongodb watch + ParseWatchDeleteData(collOidMap map[string][]string, rid string) ([]string, []elastic.BulkableRequest, bool) +} + +// IndexParserMap is the map of es index alias name -> Parser +var IndexParserMap = make(map[string]Parser) + +// InitParser initialize parser info +func InitParser(cs *ClientSet) error { + // init cache data + if err := cache.InitResourcePoolBiz(); err != nil { + return err + } + + // init index to parser map + IndexParserMap = map[string]Parser{ + metadata.IndexNameBizSet: newObjInstParser(metadata.IndexNameBizSet, cs), + metadata.IndexNameBiz: newObjInstParser(metadata.IndexNameBiz, cs), + metadata.IndexNameSet: newBizResParser(metadata.IndexNameSet, cs), + metadata.IndexNameModule: newBizResParser(metadata.IndexNameModule, cs), + metadata.IndexNameHost: newObjInstParser(metadata.IndexNameHost, cs), + metadata.IndexNameModel: newModelParser(metadata.IndexNameModel, cs), + metadata.IndexNameObjectInstance: newCommonObjInstParser(metadata.IndexNameObjectInstance, cs), + } + + return nil +} + +// ClientSet is the client set of parser +type ClientSet struct { + EsCli *elastic.Client + CacheCli cacheservice.Cache +} diff --git a/src/scene_server/sync_server/logics/full-text-search/parser/types.go b/src/scene_server/sync_server/logics/full-text-search/parser/types.go new file mode 100644 index 0000000000..725cd51c2b --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/parser/types.go @@ -0,0 +1,80 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package parser + +import ( + "configcenter/src/common" + "configcenter/src/common/metadata" +) + +const ( + // nullMetaID default metaID + nullMetaID = "0" + // commonObject common object instance identifier + commonObject = "common" +) + +const ( + // deleteTableQueryScript 表格实例删除脚本的条件 + // 例: 删除disk表格中实例_id为1的行 {"field": "tables.disk.1"} + deleteTableQueryScript = "tables.%s.%s" + // deleteTableScript 表格实例删除脚本, + // 例:删除disk表格中实例_id为1的行 ctx._source.tables.disk.remove('1'),如果删除后表格为空则删除表格字段 + deleteTableScript = `ctx._source.tables.%s.remove('%s'); + if (ctx._source.tables.%s.size()==0) {ctx._source.tables.remove('%s')}` + // updateTableScript 表格实例更新脚本(如果tables字段和表格字段不存在则先创建再更新) + // 例:更新disk表格中实例_id为1的行的keyword为xxx ctx._source.tables.disk['1'] = ["xxx"] + updateTableScript = `if(!ctx._source.containsKey('tables')){ctx._source['tables']=[:];} + if(!ctx._source.tables.containsKey('%s')){ctx._source.tables['%s']=[:];} + ctx._source.tables.%s['%s']=%s` +) + +var ( + // extraEsFieldMap is the extra es field to cc field map + extraEsFieldMap = map[string]string{ + metadata.IndexPropertyBKBizSetID: common.BKBizSetIDField, + metadata.IndexPropertyBKParentID: common.BKParentIDField, + metadata.IndexPropertyBKCloudID: common.BKCloudIDField, + } + + // baseCleanFields is the basic fields that should be cleaned from the keyword + baseCleanFields = []string{common.MongoMetaID, common.CreateTimeField, common.LastTimeField, common.BKOwnerIDField} + + // indexKeywordCleanFieldsMap is the map of es index name -> the fields that should be cleaned from the keyword + indexKeywordCleanFieldsMap = map[string][]string{ + metadata.IndexNameBizSet: {common.BKDefaultField, common.BKBizSetScopeField}, + metadata.IndexNameBiz: {common.BKDefaultField, common.BKParentIDField}, + metadata.IndexNameSet: {common.BKAppIDField, common.BKParentIDField, common.BKSetTemplateIDField, + common.BKDefaultField}, + metadata.IndexNameModule: {common.BKDefaultField, common.BKSetTemplateIDField, common.BKAppIDField, + common.BKParentIDField, common.BKSetIDField, common.BKServiceCategoryIDField}, + metadata.IndexNameHost: {common.BKOperationTimeField, common.BKParentIDField}, + metadata.IndexNameObjectInstance: {common.BKObjIDField, common.BKParentIDField}, + } + + // indexIdentifierMap is the map of es index name -> the identifier of the index, used as suffix of es id + indexIdentifierMap = map[string]string{ + metadata.IndexNameBizSet: common.BKInnerObjIDBizSet, + metadata.IndexNameBiz: common.BKInnerObjIDApp, + metadata.IndexNameSet: common.BKInnerObjIDSet, + metadata.IndexNameModule: common.BKInnerObjIDModule, + metadata.IndexNameHost: common.BKInnerObjIDHost, + metadata.IndexNameModel: common.BKInnerObjIDObject, + metadata.IndexNameObjectInstance: commonObject, + } +) diff --git a/src/scene_server/sync_server/logics/full-text-search/parser/util.go b/src/scene_server/sync_server/logics/full-text-search/parser/util.go new file mode 100644 index 0000000000..761956ef7c --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/parser/util.go @@ -0,0 +1,212 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package parser + +import ( + "context" + "fmt" + "regexp" + + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/json" + "configcenter/src/common/mapstr" + "configcenter/src/common/metadata" + "configcenter/src/common/util" + ferrors "configcenter/src/scene_server/sync_server/logics/full-text-search/errors" + "configcenter/src/scene_server/sync_server/logics/full-text-search/types" + "configcenter/src/storage/driver/mongodb" + "go.mongodb.org/mongo-driver/bson/primitive" + + "github.com/tidwall/gjson" +) + +// convMetaIDToStr convert meta id(objID/hostID/setID/moduleID/instanceID/bizID...) to string. +func convMetaIDToStr(data mapstr.MapStr, idField string) (string, error) { + id, exists := data[idField] + if !exists || id == nil || id == "" { + return "", fmt.Errorf("document id %+v is invalid", data[idField]) + } + return fmt.Sprintf("%v", id), nil +} + +// cleanCommonKeywordData cleans common fields that do not need to be saved in es +func cleanCommonKeywordData(document mapstr.MapStr, index string) mapstr.MapStr { + if len(document) == 0 { + return make(mapstr.MapStr) + } + + for _, field := range baseCleanFields { + delete(document, field) + } + + for _, field := range indexKeywordCleanFieldsMap[index] { + delete(document, field) + } + + return document +} + +// parseKeywords parse es keywords by index +func parseKeywords(data mapstr.MapStr) ([]string, error) { + jsonDoc, err := json.MarshalToString(data) + if err != nil { + return nil, err + } + + keywords := analysisJSONKeywords(gjson.Parse(jsonDoc)) + return compressKeywords(keywords), nil +} + +// analysisJSONKeywords analysis the given json style document, +// and extract all the keywords as elastic document content. +func analysisJSONKeywords(result gjson.Result) []string { + keywords := make([]string, 0) + if !result.IsObject() && !result.IsArray() { + keyword := result.String() + if len(keyword) != 0 { + keywords = append(keywords, keyword) + } + return keywords + } + + result.ForEach(func(key, value gjson.Result) bool { + keywords = append(keywords, analysisJSONKeywords(value)...) + return true + }) + + return keywords +} + +// compressKeywords compress the keywords, unique the keywords array. +func compressKeywords(keywords []string) []string { + compressedKeywords := make([]string, 0) + // keywordsMap control repeated or screened keywords. + keywordsMap := make(map[string]struct{}) + for _, keyword := range keywords { + if keyword == "" { + continue + } + if _, exist := keywordsMap[keyword]; exist { + continue + } + compressedKeywords = append(compressedKeywords, keyword) + keywordsMap[keyword] = struct{}{} + } + + return compressedKeywords +} + +// GetObjIDByData get object id by collection & instance data +func GetObjIDByData(coll string, data mapstr.MapStr) string { + switch coll { + case common.BKTableNameBaseBizSet: + return common.BKInnerObjIDBizSet + case common.BKTableNameBaseApp: + return common.BKInnerObjIDApp + case common.BKTableNameBaseSet: + return common.BKInnerObjIDSet + case common.BKTableNameBaseModule: + return common.BKInnerObjIDModule + case common.BKTableNameBaseHost: + return common.BKInnerObjIDHost + default: + if !common.IsObjectInstShardingTable(coll) { + return "" + } + + if data == nil { + return "" + } + + objID := util.GetStrByInterface(data[common.BKObjIDField]) + if objID != "" { + return objID + } + + // parse obj id from table name, NOTE: this is only a compatible logics + regex := regexp.MustCompile(`cc_ObjectBase_(.*)_pub_(.*)`) + if regex.MatchString(coll) { + matches := regex.FindStringSubmatch(coll) + return matches[2] + } + + return "" + } +} + +var objEsIndexMap = map[string]string{ + common.BKInnerObjIDBizSet: metadata.IndexNameBizSet, + common.BKInnerObjIDApp: metadata.IndexNameBiz, + common.BKInnerObjIDSet: metadata.IndexNameSet, + common.BKInnerObjIDModule: metadata.IndexNameModule, + common.BKInnerObjIDHost: metadata.IndexNameHost, +} + +// getEsIndexByObjID get the es index by object id. +func getEsIndexByObjID(objID string) string { + index, exists := objEsIndexMap[objID] + if exists { + return types.GetIndexName(index) + } + + return types.GetIndexName(metadata.IndexNameObjectInstance) +} + +type delArchive struct { + Oid string `bson:"oid"` + Coll string `bson:"coll"` + Detail mapstr.MapStr `bson:"detail"` +} + +// getDelArchive get deleted data by collOidMap, returns es id to deleted mongo data map +func getDelArchive(collOidMap map[string][]string, rid string) []delArchive { + orCond := make([]mapstr.MapStr, 0) + for coll, oids := range collOidMap { + orCond = append(orCond, mapstr.MapStr{ + "coll": coll, + "oid": oids, + }) + } + + filter := mapstr.MapStr{common.BKDBOR: orCond} + + docs := make([]delArchive, 0) + + ferrors.FatalErrHandler(200, 100, func() error { + err := mongodb.Client().Table(common.BKTableNameDelArchive).Find(filter).All(context.Background(), &docs) + if err != nil { + blog.Errorf("get del archive failed, filter: %+v, err: %v, rid: %s", filter, err, rid) + return err + } + return nil + }) + + return docs +} + +func parseOid(oid interface{}) (string, error) { + switch t := oid.(type) { + case primitive.ObjectID: + return t.Hex(), nil + case string: + return t, nil + default: + return "", fmt.Errorf("oid %+v is invalid", t) + } +} diff --git a/src/scene_server/sync_server/logics/full-text-search/service.go b/src/scene_server/sync_server/logics/full-text-search/service.go new file mode 100644 index 0000000000..bd38635c16 --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/service.go @@ -0,0 +1,104 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package fulltextsearch + +import ( + "context" + "errors" + "sync" + + ftypes "configcenter/pkg/types/sync/full-text-search" + "configcenter/src/common/blog" + "configcenter/src/scene_server/sync_server/logics/full-text-search/types" + "configcenter/src/scene_server/sync_server/logics/full-text-search/upgrader" +) + +// SyncData sync full-text search data +func (f fullTextSearch) SyncData(ctx context.Context, opt *ftypes.SyncDataOption, rid string) error { + if !f.enableSync { + return errors.New("full text search sync is disabled") + } + + // sync all data + if opt.IsAll { + var err error + pipeline := make(chan struct{}, 5) + wg := sync.WaitGroup{} + + for _, index := range types.AllIndexNames { + if err != nil { + break + } + + pipeline <- struct{}{} + wg.Add(1) + + go func(ctx context.Context, index string, rid string) { + defer func() { + <-pipeline + wg.Done() + }() + + err = f.syncDataByIndex(ctx, index, rid) + }(ctx, index, rid) + } + + wg.Wait() + return err + } + + if len(opt.Index) > 0 { + return f.syncDataByIndex(ctx, opt.Index, rid) + } + + // sync specific collection data + index, err := getIndexByColl(opt.Collection) + if err != nil { + return err + } + + _, err = f.syncCollection(ctx, index, opt.Collection, opt.Oids, rid) + if err != nil { + return err + } + return nil +} + +// Migrate full-text search index info with its related data +func (f fullTextSearch) Migrate(ctx context.Context, rid string) (*ftypes.MigrateResult, error) { + if !f.enableSync { + return nil, errors.New("full text search sync is disabled") + } + + // upgrade index info + migrateResult, indexes, err := upgrader.Upgrade(ctx, rid) + if err != nil { + blog.Errorf("migrate failed, err: %v, res: %v, rid: %s", err, migrateResult, rid) + return nil, err + } + + // sync all data in the newly created index + for _, index := range indexes { + if err = f.syncDataByIndex(ctx, index, rid); err != nil { + blog.Errorf("sync data by index %s after migration failed, err: %v, rid: %s", index, err, rid) + return nil, err + } + } + + return migrateResult, nil +} diff --git a/src/scene_server/sync_server/logics/full-text-search/sync_data.go b/src/scene_server/sync_server/logics/full-text-search/sync_data.go new file mode 100644 index 0000000000..85d1212491 --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/sync_data.go @@ -0,0 +1,403 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package fulltextsearch + +import ( + "context" + "errors" + "io" + "time" + + ftypes "configcenter/pkg/types/sync/full-text-search" + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/lock" + "configcenter/src/common/mapstr" + "configcenter/src/common/metadata" + "configcenter/src/common/util" + "configcenter/src/scene_server/sync_server/logics/full-text-search/cache" + ferrors "configcenter/src/scene_server/sync_server/logics/full-text-search/errors" + "configcenter/src/scene_server/sync_server/logics/full-text-search/parser" + "configcenter/src/scene_server/sync_server/logics/full-text-search/types" + dbtypes "configcenter/src/storage/dal/types" + "configcenter/src/storage/driver/mongodb" + "configcenter/src/storage/driver/redis" + + "github.com/olivere/elastic/v7" + "go.mongodb.org/mongo-driver/bson/primitive" +) + +// SyncData sync full-text search data by index +func (f fullTextSearch) syncDataByIndex(ctx context.Context, index string, rid string) error { + collections := make([]string, 0) + + switch index { + case metadata.IndexNameObjectInstance: + // get all object instance collections by objects + objs := make([]metadata.Object, 0) + + ferrors.FatalErrHandler(200, 100, func() error { + err := mongodb.Client().Table(common.BKTableNameObjDes).Find(nil).Fields(common.BKObjIDField, + common.BkSupplierAccount).All(ctx, &objs) + if err != nil { + blog.Errorf("get objects failed, err: %v, rid: %s", err, rid) + return err + } + return nil + }) + + for _, obj := range objs { + isQuoted, _, _ := cache.GetQuotedInfoByObjID(f.cacheCli, obj.ObjectID, obj.OwnerID) + if isQuoted { + continue + } + collections = append(collections, common.GetObjectInstTableName(obj.ObjectID, obj.OwnerID)) + } + default: + collections = append(collections, types.IndexCollMap[index]) + } + + existEsIDMap := make(map[string]struct{}) + for _, collection := range collections { + existEsIDs, err := f.syncCollection(ctx, index, collection, nil, rid) + if err != nil { + return err + } + for _, id := range existEsIDs { + existEsIDMap[id] = struct{}{} + } + } + + return f.cleanEsData(ctx, index, existEsIDMap, rid) + +} + +func (f fullTextSearch) cleanEsData(ctx context.Context, index string, existEsIDMap map[string]struct{}, + rid string) error { + + syncer, err := newDataSyncer(f.esCli.Client, index) + if err != nil { + return err + } + + var scrollID string + + for { + var scrollRes *elastic.SearchResult + err = ferrors.EsRespErrHandler(func() (bool, error) { + scrollRes, err = f.esCli.Client.Scroll(index).ScrollId(scrollID).Do(ctx) + if err != nil && err != io.EOF { + blog.Errorf("scroll get %s es data failed, err: %v, rid: %s", index, err, rid) + return false, err + } + return false, nil + }) + + if err == io.EOF { + return nil + } + + if err != nil || scrollRes.Hits == nil || scrollRes.Hits.TotalHits == nil { + blog.Errorf("scroll get %s es data failed, err: %v, res: %v, rid: %s", index, err, scrollRes, rid) + return err + } + + if len(scrollRes.Hits.Hits) == 0 { + return nil + } + + scrollID = scrollRes.ScrollId + + // delete not exist data in this range + delEsIDs := make([]string, 0) + for _, hit := range scrollRes.Hits.Hits { + _, exists := existEsIDMap[hit.Id] + if !exists { + delEsIDs = append(delEsIDs, hit.Id) + } + } + + if len(delEsIDs) > 0 { + syncer.addEsDeleteReq(delEsIDs, rid) + if err = syncer.doBulk(ctx, rid); err != nil { + blog.Infof("do %s es bulk delete failed, err: %v, del ids: %+v, rid: %s", index, err, delEsIDs, rid) + continue + } + } + } +} + +// syncCollection upsert full-text search data to es by collection +func (f fullTextSearch) syncCollection(ctx context.Context, index, coll string, oids []string, rid string) ( + []string, error) { + + syncer, err := newDataSyncer(f.esCli.Client, index) + if err != nil { + return nil, err + } + + // sync data by oids + if len(oids) > 0 { + mongoOids := make([]primitive.ObjectID, len(oids)) + for i, oid := range oids { + mongoOid, err := primitive.ObjectIDFromHex(oid) + if err != nil { + blog.Errorf("parse mongodb oid from %s failed, err: %v, rid: %s", oid, err, rid) + return nil, err + } + mongoOids[i] = mongoOid + } + cond := mapstr.MapStr{common.MongoMetaID: mapstr.MapStr{common.BKDBIN: mongoOids}} + f.upsertDataByCond(ctx, syncer, coll, cond, rid) + return nil, err + } + + // lock full-text search compensate sync operation + locker := lock.NewLocker(redis.Client()) + lockKey := genSyncLockKey(coll) + locked, err := locker.Lock(lockKey, 10*time.Minute) + if err != nil { + blog.Errorf("lock full-text search sync failed, key: %s, err: %v, rid: %s", lockKey, err, rid) + return nil, err + } + + if !locked { + return nil, errors.New("there is another sync task running, please wait until it's done") + } + defer func() { + if err = locker.Unlock(); err != nil { + blog.Errorf("unlock full-text search sync failed, key: %s, err: %v, rid: %s", lockKey, err, rid) + } + }() + + // paged get data by _id and sync to es + existEsIDs := make([]string, 0) + cond := mapstr.MapStr{} + for { + oids := f.upsertDataByCond(ctx, syncer, coll, cond, rid) + if len(oids) == 0 { + return existEsIDs, nil + } + + for _, oid := range oids { + existEsIDs = append(existEsIDs, syncer.parser.GenEsID(coll, oid.Hex())) + } + + cond = mapstr.MapStr{common.MongoMetaID: mapstr.MapStr{common.BKDBGT: oids[len(oids)-1]}} + } +} + +// genSyncLockKey generate full-text search sync lock key by collection +func genSyncLockKey(collection string) lock.StrFormat { + return lock.GetLockKey("full:text:search:sync:%s", collection) +} + +// collIndexMap is the map of cmdb collection -> es index name +var collIndexMap = map[string]string{ + common.BKTableNameBaseBizSet: metadata.IndexNameBizSet, + common.BKTableNameBaseApp: metadata.IndexNameBiz, + common.BKTableNameBaseSet: metadata.IndexNameSet, + common.BKTableNameBaseModule: metadata.IndexNameModule, + common.BKTableNameBaseHost: metadata.IndexNameHost, + common.BKTableNameObjDes: metadata.IndexNameModel, +} + +// getIndexByColl get es index name by cmdb collection name +func getIndexByColl(collection string) (string, error) { + index, exists := collIndexMap[collection] + if exists { + return index, nil + } + + if common.IsObjectInstShardingTable(collection) { + return metadata.IndexNameObjectInstance, nil + } + + return "", errors.New("collection is invalid") +} + +type mapStrWithOid struct { + Oid primitive.ObjectID `bson:"_id"` + MapStr map[string]interface{} `bson:",inline"` +} + +// upsertDataByCond upsert data to es by mongo condition, returns all oids in mongo data +func (f fullTextSearch) upsertDataByCond(ctx context.Context, syncer *dataSyncer, coll string, cond mapstr.MapStr, + rid string) []primitive.ObjectID { + + findOpt := dbtypes.NewFindOpts().SetWithObjectID(true) + allData := make([]mapStrWithOid, 0) + ferrors.FatalErrHandler(200, 100, func() error { + err := mongodb.Client().Table(coll).Find(cond, findOpt).Sort(common.MongoMetaID).Limit(ftypes.SyncDataPageSize). + All(ctx, &allData) + if err != nil { + blog.Errorf("get data failed, cond: %+v, err: %v, rid: %s", cond, err, rid) + return err + } + return nil + }) + + if len(allData) == 0 { + return make([]primitive.ObjectID, 0) + } + + dataMap := dataGetterMap[syncer.index](ctx, coll, allData, rid) + + oids := make([]primitive.ObjectID, len(allData)) + for i, data := range allData { + oids[i] = data.Oid + syncer.addUpsertReq(coll, data.Oid.Hex(), dataMap[data.Oid], rid) + } + + if err := syncer.doBulk(context.Background(), rid); err != nil { + blog.Errorf("do es bulk request failed, err: %v, coll: %s, cond: %+v, rid: %s", err, coll, cond, rid) + return oids + } + + return oids +} + +// dataGetter is the data getter to get all oid related sync data +type dataGetter func(context.Context, string, []mapStrWithOid, string) map[primitive.ObjectID][]mapstr.MapStr + +// collIndexMap is the map of index -> sync data getter +var dataGetterMap = map[string]dataGetter{ + metadata.IndexNameBizSet: objInstDataGetter, + metadata.IndexNameBiz: objInstDataGetter, + metadata.IndexNameSet: objInstDataGetter, + metadata.IndexNameModule: objInstDataGetter, + metadata.IndexNameHost: objInstDataGetter, + metadata.IndexNameModel: modelDataGetter, + metadata.IndexNameObjectInstance: objInstDataGetter, +} + +func modelDataGetter(ctx context.Context, coll string, allData []mapStrWithOid, + rid string) map[primitive.ObjectID][]mapstr.MapStr { + + objIDs := make([]string, 0) + for _, data := range allData { + objIDs = append(objIDs, util.GetStrByInterface(data.MapStr[common.BKObjIDField])) + } + + // get model related attributes + attrMap := make(map[string][]mapstr.MapStr) + cond := mapstr.MapStr{common.BKObjIDField: mapstr.MapStr{common.BKDBIN: objIDs}} + fields := []string{common.MongoMetaID, common.BKPropertyTypeField, common.BKPropertyIDField, + common.BKPropertyNameField} + + attributes := pagedGetMongoData(common.BKTableNameObjAttDes, cond, fields) + for _, attribute := range attributes { + objID := util.GetStrByInterface(attribute[common.BKObjIDField]) + attrMap[objID] = append(attrMap[objID], attribute) + } + + // model info contains the model data and its attributes + dataMap := make(map[primitive.ObjectID][]mapstr.MapStr) + for _, data := range allData { + attr := attrMap[util.GetStrByInterface(data.MapStr[common.BKObjIDField])] + dataMap[data.Oid] = append([]mapstr.MapStr{data.MapStr}, attr...) + } + return dataMap +} + +func objInstDataGetter(ctx context.Context, coll string, allData []mapStrWithOid, + rid string) map[primitive.ObjectID][]mapstr.MapStr { + + objID := parser.GetObjIDByData(coll, allData[0].MapStr) + + instIDs := make([]int64, len(allData)) + for i, data := range allData { + id, err := util.GetInt64ByInterface(data.MapStr[common.GetInstIDField(objID)]) + if err != nil { + blog.Errorf("get instance id failed, err: %v, data: %+v, rid: %s", err, data.MapStr, rid) + continue + } + instIDs[i] = id + } + + // get all model quote relations by src obj id + supplierAccount := util.GetStrByInterface(allData[0].MapStr[common.BkSupplierAccount]) + relCond := mapstr.MapStr{ + common.BKSrcModelField: objID, + common.BkSupplierAccount: supplierAccount, + } + + relations := make([]metadata.ModelQuoteRelation, 0) + ferrors.FatalErrHandler(200, 100, func() error { + err := mongodb.Client().Table(common.BKTableNameModelQuoteRelation).Find(relCond).All(ctx, &relations) + if err != nil { + blog.Errorf("get model quote relation failed, cond: %+v, err: %v", relCond, err) + return err + } + return nil + }) + + // get all quote instances + instMap := make(map[int64][]mapstr.MapStr) + + for _, relation := range relations { + table := common.GetObjectInstTableName(relation.DestModel, supplierAccount) + cond := mapstr.MapStr{common.BKInstIDField: mapstr.MapStr{common.BKDBIN: instIDs}} + + instances := pagedGetMongoData(table, cond, make([]string, 0)) + for _, instance := range instances { + instID, err := util.GetInt64ByInterface(instance[common.BKInstIDField]) + if err != nil { + blog.Errorf("get quote instance id failed, err: %v, instance: %+v, rid: %s", err, instance, rid) + continue + } + instance[common.BKPropertyIDField] = relation.PropertyID + instMap[instID] = append(instMap[instID], instance) + } + } + + // instance info contains the instance data and its quote instances + dataMap := make(map[primitive.ObjectID][]mapstr.MapStr) + for _, data := range allData { + id, _ := util.GetInt64ByInterface(data.MapStr[common.GetInstIDField(objID)]) + quote := instMap[id] + dataMap[data.Oid] = append([]mapstr.MapStr{data.MapStr}, quote...) + } + + return dataMap +} + +func pagedGetMongoData(table string, cond mapstr.MapStr, fields []string) []mapstr.MapStr { + allData := make([]mapstr.MapStr, 0) + + findOpt := dbtypes.NewFindOpts().SetWithObjectID(true) + for { + data := make([]mapstr.MapStr, 0) + ferrors.FatalErrHandler(200, 100, func() error { + err := mongodb.Client().Table(table).Find(cond, findOpt).Fields(fields...).Sort(common.MongoMetaID). + Limit(ftypes.SyncDataPageSize).All(context.Background(), &data) + if err != nil { + blog.Errorf("get quote instance failed, table: %s, cond: %+v, err: %v", table, cond, err) + return err + } + return nil + }) + + if len(data) == 0 { + return allData + } + allData = append(allData, data...) + + cond[common.MongoMetaID] = mapstr.MapStr{common.BKDBGT: data[len(data)-1][common.MongoMetaID]} + } +} diff --git a/src/scene_server/sync_server/logics/full-text-search/types/types.go b/src/scene_server/sync_server/logics/full-text-search/types/types.go new file mode 100644 index 0000000000..344297a434 --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/types/types.go @@ -0,0 +1,78 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package types defines the full-text search synchronization common types +package types + +import ( + "configcenter/src/common" + "configcenter/src/common/metadata" +) + +var ( + // AllIndexNames is all elastic index names + AllIndexNames = []string{metadata.IndexNameBizSet, metadata.IndexNameBiz, metadata.IndexNameSet, + metadata.IndexNameModule, metadata.IndexNameHost, metadata.IndexNameModel, metadata.IndexNameObjectInstance} + + // IndexVersionMap is elastic alias index name to version map + // NOTE: CHANGE the version name if you have modified the indexes metadata struct. + IndexVersionMap = map[string]string{ + metadata.IndexNameBizSet: "20210710", + metadata.IndexNameBiz: "20210710", + metadata.IndexNameSet: "20210710", + metadata.IndexNameModule: "20210710", + metadata.IndexNameHost: "20210710", + metadata.IndexNameModel: "20210710", + metadata.IndexNameObjectInstance: "20210710", + } + + // IndexMap is es index alias name to related index data map + IndexMap = make(map[string][]*metadata.ESIndex) + + // IndexExtraFieldsMap is the map of es index alias name -> extra fields in the index besides common fields + IndexExtraFieldsMap = map[string][]string{ + metadata.IndexNameBizSet: {metadata.IndexPropertyBKBizSetID}, + metadata.IndexNameSet: {metadata.IndexPropertyBKParentID}, + metadata.IndexNameHost: {metadata.IndexPropertyBKCloudID}, + } + + // IndexExcludeFieldsMap is the map of es index alias name -> the excluded fields of common fields + IndexExcludeFieldsMap = map[string][]string{ + metadata.IndexNameBizSet: {metadata.IndexPropertyBKBizID}, + metadata.IndexNameHost: {metadata.IndexPropertyBKBizID}, + } + + // IndexCollMap is the map of es index alias name -> cmdb collection + IndexCollMap = map[string]string{ + metadata.IndexNameBizSet: common.BKTableNameBaseBizSet, + metadata.IndexNameBiz: common.BKTableNameBaseApp, + metadata.IndexNameSet: common.BKTableNameBaseSet, + metadata.IndexNameModule: common.BKTableNameBaseModule, + metadata.IndexNameHost: common.BKTableNameBaseHost, + metadata.IndexNameModel: common.BKTableNameObjDes, + metadata.IndexNameObjectInstance: common.BKTableNameBaseInst, + } +) + +// GetIndexName get actual index name by alias name +// right now one alias name is related to only one index +func GetIndexName(alias string) string { + for _, index := range IndexMap[alias] { + return index.Name() + } + return alias +} diff --git a/src/scene_server/sync_server/logics/full-text-search/upgrader/current.go b/src/scene_server/sync_server/logics/full-text-search/upgrader/current.go new file mode 100644 index 0000000000..773a3b2c49 --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/upgrader/current.go @@ -0,0 +1,88 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package upgrader + +import ( + "context" + + "configcenter/src/common/blog" + "configcenter/src/common/metadata" + "configcenter/src/scene_server/sync_server/logics/full-text-search/types" +) + +// initCurrentEsIndex initialize current es indexes, all indices are stored in types.IndexMap +func (u *upgrader) initCurrentEsIndex() { + if len(types.IndexMap) > 0 { + return + } + + for _, name := range types.AllIndexNames { + meta := &metadata.ESIndexMetadata{ + Settings: u.indexSetting, + Mappings: metadata.ESIndexMetaMappings{ + Properties: map[string]metadata.ESIndexMetaMappingsProperty{ + metadata.IndexPropertyID: {PropertyType: metadata.IndexPropertyTypeKeyword}, + metadata.IndexPropertyDataKind: {PropertyType: metadata.IndexPropertyTypeKeyword}, + metadata.IndexPropertyBKObjID: {PropertyType: metadata.IndexPropertyTypeKeyword}, + metadata.IndexPropertyBKSupplierAccount: {PropertyType: metadata.IndexPropertyTypeKeyword}, + metadata.IndexPropertyBKBizID: {PropertyType: metadata.IndexPropertyTypeKeyword}, + metadata.IndexPropertyKeywords: {PropertyType: metadata.IndexPropertyTypeKeyword}, + metadata.TablePropertyName: {PropertyType: metadata.IndexPropertyTypeObject}, + }, + }, + } + + for _, field := range types.IndexExcludeFieldsMap[name] { + delete(meta.Mappings.Properties, field) + } + + for _, field := range types.IndexExtraFieldsMap[name] { + meta.Mappings.Properties[field] = metadata.ESIndexMetaMappingsProperty{ + PropertyType: metadata.IndexPropertyTypeKeyword, + } + } + + types.IndexMap[name] = []*metadata.ESIndex{metadata.NewESIndex(name, types.IndexVersionMap[name], meta)} + } +} + +// createCurrentEsIndex create current es indexes in es +func (u *upgrader) createCurrentEsIndex(ctx context.Context, rid string) (map[string]struct{}, error) { + newIndexMap := make(map[string]struct{}) + + for name, indexes := range types.IndexMap { + for _, index := range indexes { + exists, err := u.createIndex(ctx, index, rid) + if err != nil { + return nil, err + } + + if !exists { + newIndexMap[name] = struct{}{} + } + + if err = u.addAlias(ctx, index, rid); err != nil { + return nil, err + } + } + } + + blog.Infof("finished es index initialization") + + return newIndexMap, nil +} diff --git a/src/scene_server/sync_server/logics/full-text-search/upgrader/index.go b/src/scene_server/sync_server/logics/full-text-search/upgrader/index.go new file mode 100644 index 0000000000..4f03dca6db --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/upgrader/index.go @@ -0,0 +1,62 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package upgrader + +import ( + "context" + + "configcenter/src/common/blog" + "configcenter/src/common/metadata" +) + +// createIndex create index if not exists +func (u *upgrader) createIndex(ctx context.Context, index *metadata.ESIndex, rid string) (bool, error) { + // check if elastic index exists + exist, err := u.esCli.IndexExists(index.Name()).Do(ctx) + if err != nil { + blog.Errorf("check elastic index[%s] existence failed, err: %v, rid: %s", index.Name(), err, rid) + return false, err + } + + if exist { + return true, nil + } + + // create new index with the target index name + _, err = u.esCli.CreateIndex(index.Name()).Body(index.Metadata()).Do(ctx) + if err != nil { + blog.Errorf("create elastic index[%s] failed, err: %v, rid: %s", index.Name(), err, rid) + return false, err + } + + return false, nil +} + +// addAlias add index alias name +func (u *upgrader) addAlias(ctx context.Context, index *metadata.ESIndex, rid string) error { + aliasName := index.AliasName() + name := index.Name() + + // it's ok if the alias name index is already exist, but the alias name can not be a real index + if _, err := u.esCli.Alias().Add(name, aliasName).Do(ctx); err != nil { + blog.Errorf("bind elastic index[%s] alias %s failed, err: %v, rid: %s", name, aliasName, err, rid) + return err + } + + return nil +} diff --git a/src/scene_server/sync_server/logics/full-text-search/upgrader/readme.md b/src/scene_server/sync_server/logics/full-text-search/upgrader/readme.md new file mode 100644 index 0000000000..ef4c2b533d --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/upgrader/readme.md @@ -0,0 +1,31 @@ +upgrader +========= + +## 概述 + +upgrader包用于全文检索相关的数据初始化和升级,包括Elasticsearch索引的初始化和升级,数据升级等功能。 + +## 目录结构 + +``` +. +├── current.go 用于生成并创建最新版本的Elasticsearch索引 +├── index.go 包含Elasticsearch索引相关的工具函数 +├── readme.md 帮助文档 +├── upgrader.go 通过运行upgrader进行索引和数据的升级 +└── v{version}.go 指定版本的upgrader逻辑,例如v1.go文件存放版本1的upgrader +``` + +## 升级方式 + +### 前置准备 + +- 将最新版本的Elasticsearch索引信息写入到`current.go`文件中,如果索引有更新,则需要更新索引版本号 +- 将当前版本的索引信息和与上一个版本对比需要进行的升级逻辑写入到新版本的`v{version}.go`文件中,并通过`RegisterUpgrader`方法注册到upgrader池中 +- 如果涉及到索引的删除操作,则直接从新增该索引的版本开始删除掉索引的相关操作逻辑 + +### upgrader执行流程 + +- 获取当前版本信息,从当前版本开始执行upgrader进行升级,如果已经是最新版本则不需要进行升级 +- 因为全文检索数据同步需要依赖于对应版本的Elasticsearch索引,所以优先创建最新版本的Elasticsearch索引,此时可以开始进行数据同步 +- 按版本顺序执行每一个upgrader,其中如果涉及到数据迁移则直接将数据迁移到最新版本的Elasticsearch索引中 diff --git a/src/scene_server/sync_server/logics/full-text-search/upgrader/upgrader.go b/src/scene_server/sync_server/logics/full-text-search/upgrader/upgrader.go new file mode 100644 index 0000000000..263b7cc07d --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/upgrader/upgrader.go @@ -0,0 +1,210 @@ +// Package upgrader defines the upgrade logics of full-text-search sync +package upgrader + +import ( + "context" + "sort" + "sync" + + ftypes "configcenter/pkg/types/sync/full-text-search" + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/metadata" + "configcenter/src/scene_server/sync_server/logics/full-text-search/types" + "configcenter/src/storage/driver/mongodb" + + "github.com/olivere/elastic/v7" +) + +// upgraderInst is an instance of upgrader +var upgraderInst = &upgrader{ + upgraderPool: make(map[int]UpgraderFunc), + registerLock: sync.Mutex{}, +} + +// InitUpgrader initialize global upgrader +func InitUpgrader(esCli *elastic.Client, indexSetting metadata.ESIndexMetaSettings) { + upgraderInst.esCli = esCli + upgraderInst.indexSetting = indexSetting + upgraderInst.initCurrentEsIndex() +} + +// upgrader is the full-text-search sync upgrade structure +type upgrader struct { + // esCli is the elasticsearch client + esCli *elastic.Client + // indexSetting is the es index meta setting + indexSetting metadata.ESIndexMetaSettings + // upgraderPool is the mapping of all upgrader version -> upgrader function + upgraderPool map[int]UpgraderFunc + // registerLock is the lock for registering upgrader function to avoid conflict + registerLock sync.Mutex +} + +// UpgraderFunc is upgrader function definition +// NOTE: do not need to add new index, only update/remove old index and migrate data +type UpgraderFunc func(ctx context.Context, rid string) (*UpgraderFuncResult, error) + +// UpgraderFuncResult is upgrader function return result +type UpgraderFuncResult struct { + // Indexes is all indexes in this version of upgrader + Indexes []string + // ReindexInfo is the reindex info of the pre version index to new version index + ReindexInfo map[string]string +} + +// RegisterUpgrader register upgrader +func RegisterUpgrader(version int, handler UpgraderFunc) { + upgraderInst.registerLock.Lock() + defer upgraderInst.registerLock.Unlock() + + upgraderInst.upgraderPool[version] = handler +} + +// Upgrade es index to the newest version +func Upgrade(ctx context.Context, rid string) (*ftypes.MigrateResult, []string, error) { + // compare version to get the needed upgraders + dbVersion, versions, result, err := compareVersions(ctx, rid) + if err != nil { + return nil, nil, err + } + + if len(versions) == 0 { + return result, nil, nil + } + + // add current version indexes first + newIndexMap, err := upgraderInst.createCurrentEsIndex(ctx, rid) + if err != nil { + return nil, nil, err + } + + currentIndexMap := make(map[string]struct{}) + for _, indexes := range types.IndexMap { + for _, index := range indexes { + currentIndexMap[index.Name()] = struct{}{} + } + } + + delIndexMap := make(map[string]struct{}) + reIndexInfo := make(map[string]string) + + // do all the upgrader + for _, version := range versions { + upgraderFunc := upgraderInst.upgraderPool[version] + res, err := upgraderFunc(ctx, rid) + if err != nil { + blog.Errorf("upgrade full-text search sync failed, version: %d, err: %v, rid: %s", version, err, rid) + return nil, nil, err + } + + for _, index := range res.Indexes { + _, exists := currentIndexMap[index] + if !exists { + delIndexMap[index] = struct{}{} + } + } + + for oldIdx, newIdx := range res.ReindexInfo { + reIndexInfo[oldIdx] = newIdx + delete(newIndexMap, newIdx) + } + + dbVersion.CurrentVersion = version + if err = updateVersion(ctx, dbVersion, rid); err != nil { + return nil, nil, err + } + } + + // TODO complete these logics in next version, right now there's only one version + // TODO delete all old indexes + // TODO reindex all data + + // returns all new indexes that requires data sync + syncIndexes := make([]string, 0) + for index := range newIndexMap { + syncIndexes = append(syncIndexes, index) + } + + return result, syncIndexes, nil +} + +func compareVersions(ctx context.Context, rid string) (*Version, []int, *ftypes.MigrateResult, error) { + dbVersion, err := getVersion(ctx, rid) + if err != nil { + return nil, nil, nil, err + } + + result := &ftypes.MigrateResult{ + PreVersion: dbVersion.CurrentVersion, + CurrentVersion: dbVersion.CurrentVersion, + FinishedVersions: make([]int, 0), + } + + var versions []int + for version := range upgraderInst.upgraderPool { + if version > dbVersion.CurrentVersion { + versions = append(versions, version) + } + } + + if len(versions) == 0 { + return nil, versions, result, nil + } + + dbVersion.InitVersion = dbVersion.CurrentVersion + sort.Ints(versions) + return dbVersion, versions, result, nil +} + +// fullTextVersion is the full-text search sync version type +const fullTextVersion = "full_text_search_version" + +// Version is the full-text search sync version info +type Version struct { + Type string `bson:"type"` + CurrentVersion int `bson:"current_version"` + InitVersion int `bson:"init_version"` +} + +// getVersion get full-text search sync version info from db +func getVersion(ctx context.Context, rid string) (*Version, error) { + condition := map[string]interface{}{ + "type": fullTextVersion, + } + + data := new(Version) + err := mongodb.Client().Table(common.BKTableNameSystem).Find(condition).One(ctx, &data) + if err != nil { + if !mongodb.Client().IsNotFoundError(err) { + blog.Errorf("get full-text search sync version failed, err: %v, rid: %s", err, rid) + return nil, err + } + + data.Type = fullTextVersion + + err = mongodb.Client().Table(common.BKTableNameSystem).Insert(ctx, data) + if err != nil { + blog.Errorf("insert full-text search sync version failed, err: %v, rid: %s", err, rid) + return nil, err + } + return data, nil + } + + return data, nil +} + +// updateVersion update full-text search sync version info to db +func updateVersion(ctx context.Context, version *Version, rid string) error { + condition := map[string]interface{}{ + "type": fullTextVersion, + } + + err := mongodb.Client().Table(common.BKTableNameSystem).Update(ctx, condition, version) + if err != nil { + blog.Errorf("update full-text search sync version %+v failed, err: %v, rid: %s", version, err, rid) + return err + } + + return nil +} diff --git a/src/scene_server/sync_server/logics/full-text-search/upgrader/v1.go b/src/scene_server/sync_server/logics/full-text-search/upgrader/v1.go new file mode 100644 index 0000000000..3e9d566b09 --- /dev/null +++ b/src/scene_server/sync_server/logics/full-text-search/upgrader/v1.go @@ -0,0 +1,118 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package upgrader + +import ( + "context" + + "configcenter/src/common/blog" + "configcenter/src/common/json" + "configcenter/src/common/mapstruct" + "configcenter/src/common/metadata" + "configcenter/src/scene_server/sync_server/logics/full-text-search/types" +) + +func init() { + RegisterUpgrader(1, upgraderInst.upgradeV1) +} + +var v1Indexes = []string{ + "bk_cmdb.bk_biz_set_obj_20210710", + "bk_cmdb.biz_20210710", + "bk_cmdb.set_20210710", + "bk_cmdb.module_20210710", + "bk_cmdb.host_20210710", + "bk_cmdb.model_20210710", + "bk_cmdb.object_instance_20210710", +} + +var ( + // tableMappingStr is the json string of table property related index mapping + tableMappingStr string +) + +// upgradeV1 add table property related index mapping +func (u *upgrader) upgradeV1(ctx context.Context, rid string) (*UpgraderFuncResult, error) { + tableMappings := metadata.ESIndexMetaMappings{Properties: map[string]metadata.ESIndexMetaMappingsProperty{ + metadata.TablePropertyName: {PropertyType: metadata.IndexPropertyTypeObject}, + }} + + var err error + tableMappingStr, err = json.MarshalToString(tableMappings) + if err != nil { + blog.Errorf("marshal table mapping[%+v] failed, err: %v, rid: %s", tableMappings, err, rid) + return nil, err + } + + currentIndexMap := make(map[string]struct{}) + for _, indexes := range types.IndexMap { + for _, index := range indexes { + currentIndexMap[index.Name()] = struct{}{} + } + } + + for _, index := range v1Indexes { + if _, exists := currentIndexMap[index]; !exists { + continue + } + + if err = u.addTablePropertyMapping(ctx, index, rid); err != nil { + return nil, err + } + + } + + return &UpgraderFuncResult{Indexes: v1Indexes}, nil +} + +// addTablePropertyMapping add table property index mappings if not exists +func (u *upgrader) addTablePropertyMapping(ctx context.Context, name string, rid string) error { + // check if table property mapping exists in the index + // table property mapping example: {"mappings":{"properties":{"tables":{"type":"object"}}}} + IndexMapping, err := u.esCli.GetMapping(). + Index(name). + Do(ctx) + if err != nil { + blog.Errorf("get index[%s] mapping failed, err: %v, rid: %s", name, err, rid) + return err + } + + indexMetadata := new(metadata.ESIndexMetadata) + if err = mapstruct.Decode2StructWithTag(IndexMapping, indexMetadata, "json"); err != nil { + blog.Errorf("decode index[%s] table mapping %+v failed, err: %v, rid: %s", name, IndexMapping, err, rid) + return err + } + + for property := range indexMetadata.Mappings.Properties { + if property == metadata.TablePropertyName { + return nil + } + } + + // add table property mapping + _, err = u.esCli.PutMapping(). + BodyString(tableMappingStr). + Index(name). + Do(ctx) + if err != nil { + blog.Errorf("add index[%s] table mapping %s failed, err: %v, rid: %s", name, tableMappingStr, err, rid) + return err + } + + return nil +} diff --git a/src/scene_server/sync_server/logics/logics.go b/src/scene_server/sync_server/logics/logics.go new file mode 100644 index 0000000000..c0cdf21f26 --- /dev/null +++ b/src/scene_server/sync_server/logics/logics.go @@ -0,0 +1,48 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package logics defines the synchronization logics +package logics + +import ( + "configcenter/src/common/backbone" + "configcenter/src/scene_server/sync_server/logics/full-text-search" + "configcenter/src/storage/stream" +) + +// Logics defines the struct that contains all sync logics +type Logics struct { + FullTextSearch fulltextsearch.SyncI +} + +// New Logics instance +func New(engine *backbone.Engine, conf *Config, watcher stream.LoopInterface) (*Logics, error) { + lgc := new(Logics) + var err error + + lgc.FullTextSearch, err = fulltextsearch.New(conf.FullTextSearch, engine.CoreAPI.CacheService().Cache(), watcher) + if err != nil { + return nil, err + } + + return lgc, nil +} + +// Config defines synchronization logics configuration +type Config struct { + FullTextSearch *fulltextsearch.Config `mapstructure:"fullTextSearch"` +} diff --git a/src/scene_server/sync_server/logics/token_handler/handler.go b/src/scene_server/sync_server/logics/token_handler/handler.go new file mode 100644 index 0000000000..372442c2df --- /dev/null +++ b/src/scene_server/sync_server/logics/token_handler/handler.go @@ -0,0 +1,147 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package tokenhandler defines the common token handler for incremental sync logics using watch +package tokenhandler + +import ( + "context" + "time" + + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/source_controller/cacheservice/event" + "configcenter/src/storage/dal" + "configcenter/src/storage/stream/types" +) + +var _ types.TokenHandler = new(TokenHandler) + +// TokenHandler is the token handler that manges watch token data +type TokenHandler struct { + key string + watchDB dal.DB + metrics *event.EventMetrics +} + +// New creates a TokenHandler +func New(key string, watchDB dal.DB, metrics *event.EventMetrics) (*TokenHandler, error) { + // create watch token table if not exists + exists, err := watchDB.HasTable(context.Background(), WatchTokenTable) + if err != nil { + blog.Errorf("check if table %s exists failed, err: %v", WatchTokenTable, err) + return nil, err + } + + if !exists { + err = watchDB.CreateTable(context.Background(), WatchTokenTable) + if err != nil && !watchDB.IsDuplicatedError(err) { + blog.Errorf("create table %s failed, err: %v", WatchTokenTable, err) + return nil, err + } + } + + return &TokenHandler{ + key: key, + watchDB: watchDB, + metrics: metrics, + }, nil +} + +// InitWatchToken initialize watch token data +func (t TokenHandler) InitWatchToken(ctx context.Context) error { + data := map[string]interface{}{ + common.MongoMetaID: t.key, + common.BKTokenField: "", + common.BKStartAtTimeField: time.Now(), + } + + if err := t.watchDB.Table(WatchTokenTable).Insert(ctx, data); err != nil { + blog.Errorf("set %s last watch token failed, err: %v, data: %+v", t.key, err, data) + return err + } + + return t.SetLastWatchTokenData(context.Background(), data) +} + +// SetLastWatchToken set last watch token +func (t TokenHandler) SetLastWatchToken(ctx context.Context, token string) error { + data := map[string]interface{}{ + common.BKTokenField: token, + } + return t.SetLastWatchTokenData(ctx, data) +} + +// SetLastWatchTokenData set last watch token info +func (t TokenHandler) SetLastWatchTokenData(ctx context.Context, data map[string]interface{}) error { + filter := map[string]interface{}{ + common.MongoMetaID: t.key, + } + if err := t.watchDB.Table(WatchTokenTable).Update(ctx, filter, data); err != nil { + blog.Errorf("set %s last watch token failed, err: %v, data: %+v", t.key, err, data) + return err + } + return nil +} + +// GetStartWatchToken get start watch token from watch token db +func (t TokenHandler) GetStartWatchToken(ctx context.Context) (string, error) { + filter := map[string]interface{}{ + common.MongoMetaID: t.key, + } + + data := new(WatchToken) + err := t.watchDB.Table(WatchTokenTable).Find(filter).Fields(common.BKTokenField).One(ctx, data) + if err != nil { + t.metrics.CollectMongoError() + blog.Errorf("get %s start watch token failed, err: %v, filter: %+v", t.key, err, filter) + return "", err + } + + return data.Token, nil +} + +// ResetWatchToken set watch token to empty and set the start watch time to the given one for next watch +func (t TokenHandler) ResetWatchToken(startAtTime types.TimeStamp) error { + data := map[string]interface{}{ + common.BKTokenField: "", + common.BKStartAtTimeField: startAtTime, + } + + return t.SetLastWatchTokenData(context.Background(), data) +} + +// GetStartWatchTime get start watch token data from watch token db +func (t TokenHandler) GetStartWatchTime(ctx context.Context) (bool, types.TimeStamp, error) { + filter := map[string]interface{}{ + common.MongoMetaID: t.key, + } + + data := new(WatchToken) + err := t.watchDB.Table(WatchTokenTable).Find(filter).Fields(common.BKStartAtTimeField).One(ctx, data) + if err != nil { + if t.watchDB.IsNotFoundError(err) { + return false, types.TimeStamp{}, nil + } + + t.metrics.CollectMongoError() + blog.Errorf("get %s start watch token data failed, err: %v, filter: %+v", t.key, err, filter) + return false, types.TimeStamp{}, err + } + + return true, data.StartAtTime, nil +} diff --git a/src/scene_server/sync_server/logics/token_handler/types.go b/src/scene_server/sync_server/logics/token_handler/types.go new file mode 100644 index 0000000000..5d0b562de7 --- /dev/null +++ b/src/scene_server/sync_server/logics/token_handler/types.go @@ -0,0 +1,30 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package tokenhandler + +import "configcenter/src/storage/stream/types" + +// WatchTokenTable is the table to store the latest watch token for sync logics +const WatchTokenTable = "cc_SyncWatchToken" + +// WatchToken is the watch token data for mongodb watch +type WatchToken struct { + Coll string `json:"_id" bson:"_id"` + Token string `json:"token" bson:"token"` + StartAtTime types.TimeStamp `json:"start_at_time,omitempty" bson:"start_at_time,omitempty"` +} diff --git a/src/scene_server/sync_server/readme.md b/src/scene_server/sync_server/readme.md new file mode 100644 index 0000000000..3b3801afae --- /dev/null +++ b/src/scene_server/sync_server/readme.md @@ -0,0 +1,39 @@ +sync server +=========== + +## 概述 + +sync server 负责和第三方组件与服务进行数据同步。支持数据结构和元数据初始化,和全量数据同步和增量数据同步的后台任务,并提供[数据同步小工具](../../tools/cmdb_ctl/readme.md#数据同步)对数据进行补偿同步。简要的同步方式如下: + +![img.png](../../../docs/resource/img/sync-server/simple_sync.png) + +## 全文检索同步 + +将MongoDB中的数据同步到Elasticsearch中,通过Watch机制进行增量同步。第一次部署时会进行初始化操作将所有需要进行全文检索的数据全量同步到ES。 + +### 索引管理 + +cmdb将会创建附带特定版本后缀的真实ES索引,如`bk_cmdb.biz_20210701`, 并且只会在索引不存在时创建,特定版本索引的结构信息在代码中固定,在索引结构发生变化时代码中的版本后缀也会发生变化。 +在成功创建索引后,cmdb会为每一个索引创建系统别名,如`bk_cmdb.biz` `bk_cmdb.set` `bk_cmdb.module` `bk_cmdb.host` `bk_cmdb.model` `bk_cmdb.object_instance`, 这些别名为蓝鲸CMDB内部索引、查询等操作所使用的别名。 + +索引版本发生变化时cmdb会进行索引迁移,将旧索引替换为新索引,并将旧索引中的数据迁移到新索引中。该操作耗时较长,迁移时全文检索数据可能不准确。 + +阅读官方文档 [elastic reindex doc](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html) 了解reindex操作。 +阅读官方文档 [elastic alias doc](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html) 了解索引别名机制。 + +### 配置 + +通过common.yaml配置文件里的syncServer配置管理同步所需的配置,mongo和es等通用组件的配置使用mongodb.yaml和common.yaml中的通用配置,全文检索的同步配置在其中的fullTextSearch配置项中: + +``` yaml +# syncServer相关配置 +syncServer: + # 全文检索同步相关配置 + fullTextSearch: + # 是否开启全文检索同步, 默认为false + enableSync: true + # ES索引拥有的主分片数量,详情请参见:https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html + indexShardNum: 1 + # ES索引每个主分片拥有的副本数量,详情请参见:https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html + indexReplicaNum: 1 +``` diff --git a/src/scene_server/sync_server/service/healthz.go b/src/scene_server/sync_server/service/healthz.go new file mode 100644 index 0000000000..56415736b4 --- /dev/null +++ b/src/scene_server/sync_server/service/healthz.go @@ -0,0 +1,77 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package service + +import ( + "configcenter/src/common" + "configcenter/src/common/metadata" + "configcenter/src/common/metric" + "configcenter/src/common/types" + "configcenter/src/storage/driver/mongodb" + + "github.com/emicklei/go-restful/v3" +) + +// Healthz health check +func (s *Service) Healthz(req *restful.Request, resp *restful.Response) { + meta := metric.HealthMeta{IsHealthy: true} + + // zk health status + zkItem := metric.HealthItem{IsHealthy: true, Name: types.CCFunctionalityServicediscover} + if err := s.engine.Ping(); err != nil { + zkItem.IsHealthy = false + zkItem.Message = err.Error() + } + meta.Items = append(meta.Items, zkItem) + + // mongodb status + mongoItem := metric.HealthItem{IsHealthy: true, Name: types.CCFunctionalityMongo} + if mongodb.Client() == nil { + mongoItem.IsHealthy = false + mongoItem.Message = "not connected" + } else if err := mongodb.Client().Ping(); err != nil { + mongoItem.IsHealthy = false + mongoItem.Message = err.Error() + } + meta.Items = append(meta.Items, mongoItem) + + for _, item := range meta.Items { + if item.IsHealthy == false { + meta.IsHealthy = false + meta.Message = "sync server is unhealthy" + break + } + } + + info := metric.HealthInfo{ + Module: types.CC_MODULE_SYNC, + HealthMeta: meta, + AtTime: metadata.Now(), + } + + answer := metric.HealthResponse{ + Code: common.CCSuccess, + Data: info, + OK: meta.IsHealthy, + Result: meta.IsHealthy, + Message: meta.Message, + } + answer.SetCommonResponse() + resp.Header().Set("Content-Type", "application/json") + _ = resp.WriteEntity(answer) +} diff --git a/src/scene_server/sync_server/service/service.go b/src/scene_server/sync_server/service/service.go new file mode 100644 index 0000000000..9adcc444f2 --- /dev/null +++ b/src/scene_server/sync_server/service/service.go @@ -0,0 +1,88 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package service provides sync server's web service +package service + +import ( + "net/http" + + "configcenter/src/common/backbone" + "configcenter/src/common/errors" + "configcenter/src/common/http/rest" + "configcenter/src/common/rdapi" + "configcenter/src/common/webservice/restfulservice" + "configcenter/src/scene_server/sync_server/logics" + "configcenter/src/thirdparty/logplatform/opentelemetry" + + "github.com/emicklei/go-restful/v3" +) + +// Service defines sync server's web service +type Service struct { + engine *backbone.Engine + lgc *logics.Logics +} + +// New Service +func New(engine *backbone.Engine, lgc *logics.Logics) *Service { + return &Service{ + engine: engine, + lgc: lgc, + } +} + +// WebService provides web service +func (s *Service) WebService() *restful.Container { + errors.SetGlobalCCError(s.engine.CCErr) + getErrFunc := func() errors.CCErrorIf { + return s.engine.CCErr + } + + api := new(restful.WebService) + api.Path("/sync/v3/").Filter(s.engine.Metric().RestfulMiddleWare).Filter(rdapi.AllGlobalFilter(getErrFunc)). + Produces(restful.MIME_JSON) + + // init service actions + s.initService(api) + + container := restful.NewContainer().Add(api) + + opentelemetry.AddOtlpFilter(container) + + // common api + commonAPI := new(restful.WebService).Produces(restful.MIME_JSON) + commonAPI.Route(commonAPI.GET("/healthz").To(s.Healthz)) + commonAPI.Route(commonAPI.GET("/version").To(restfulservice.Version)) + container.Add(commonAPI) + + return container +} + +func (s *Service) initService(api *restful.WebService) { + u := rest.NewRestUtility(rest.Config{ + ErrorIf: s.engine.CCErr, + Language: s.engine.Language, + }) + + u.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/sync/full/text/search/data", + Handler: s.SyncFullTextSearchData}) + u.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/migrate/full/text/search", + Handler: s.MigrateFullTextSearch}) + + u.AddToRestfulWebService(api) +} diff --git a/src/scene_server/sync_server/service/sync.go b/src/scene_server/sync_server/service/sync.go new file mode 100644 index 0000000000..9aa7942f19 --- /dev/null +++ b/src/scene_server/sync_server/service/sync.go @@ -0,0 +1,83 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package service + +import ( + "context" + "time" + + ftypes "configcenter/pkg/types/sync/full-text-search" + "configcenter/src/common/blog" + "configcenter/src/common/http/rest" +) + +// SyncFullTextSearchData sync data for full-text search, NOTE: this is an async api +func (s *Service) SyncFullTextSearchData(cts *rest.Contexts) { + opt := new(ftypes.SyncDataOption) + if err := cts.DecodeInto(opt); err != nil { + cts.RespAutoError(err) + return + } + + rawErr := opt.Validate() + if rawErr.ErrCode != 0 { + cts.RespAutoError(rawErr.ToCCError(cts.Kit.CCError)) + return + } + + blog.Infof("start sync full-text search data request, opt: %+v, rid: %s", opt, cts.Kit.Rid) + + go func() { + err := s.lgc.FullTextSearch.SyncData(context.Background(), opt, cts.Kit.Rid) + if err != nil { + blog.Errorf("run sync full-text search data req failed, err: %v, opt: %+v, rid: %s", err, opt, cts.Kit.Rid) + return + } + blog.Infof("finished sync full-text search data request, opt: %+v, rid: %s", opt, cts.Kit.Rid) + }() + + cts.RespEntity(nil) +} + +// MigrateFullTextSearch migrate full-text search info, NOTE: this is an async api +func (s *Service) MigrateFullTextSearch(cts *rest.Contexts) { + blog.Infof("start migrate full-text search request, rid: %s", cts.Kit.Rid) + + var result *ftypes.MigrateResult + done := make(chan struct{}, 1) + + go func() { + res, err := s.lgc.FullTextSearch.Migrate(context.Background(), cts.Kit.Rid) + if err != nil { + blog.Errorf("run migrate full-text search req failed, err: %v, rid: %s", err, cts.Kit.Rid) + return + } + result = res + done <- struct{}{} + + blog.Infof("finished migrate full-text search request, opt: %+v, rid: %s", cts.Kit.Rid) + }() + + tick := time.Tick(10 * time.Second) + select { + case <-tick: + cts.RespEntity(&ftypes.MigrateResult{Message: "migrate full-text search task is running"}) + case <-done: + cts.RespEntity(result) + } +} diff --git a/src/scene_server/sync_server/sync.go b/src/scene_server/sync_server/sync.go new file mode 100644 index 0000000000..7c63b28083 --- /dev/null +++ b/src/scene_server/sync_server/sync.go @@ -0,0 +1,60 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package main defines the main function for sync server +package main + +import ( + "context" + "fmt" + "os" + "runtime" + + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/types" + "configcenter/src/common/util" + "configcenter/src/scene_server/sync_server/app" + "configcenter/src/scene_server/sync_server/app/options" + + "github.com/spf13/pflag" +) + +func main() { + common.SetIdentification(types.CC_MODULE_SYNC) + runtime.GOMAXPROCS(runtime.NumCPU()) + + blog.InitLogs() + defer blog.CloseLogs() + + op := options.NewServerOption() + op.AddFlags(pflag.CommandLine) + + util.InitFlags() + + if err := common.SavePid(); err != nil { + blog.Errorf("save pid failed. err: %v", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + if err := app.Run(ctx, cancel, op); err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + blog.Errorf("sync server stopped by %v", err) + blog.CloseLogs() + os.Exit(1) + } +} diff --git a/src/scene_server/topo_server/app/options/options.go b/src/scene_server/topo_server/app/options/options.go index fee7061223..7b057cfa87 100644 --- a/src/scene_server/topo_server/app/options/options.go +++ b/src/scene_server/topo_server/app/options/options.go @@ -35,7 +35,7 @@ type Config struct { Auth iam.AuthConfig Redis redis.Config ConfigMap map[string]string - Es elasticsearch.EsConfig + Es *elasticsearch.EsConfig } // NewServerOption TODO diff --git a/src/scene_server/topo_server/app/server.go b/src/scene_server/topo_server/app/server.go index 3ea0699606..19971bc490 100644 --- a/src/scene_server/topo_server/app/server.go +++ b/src/scene_server/topo_server/app/server.go @@ -45,7 +45,7 @@ func (t *TopoServer) onTopoConfigUpdate(previous, current cc.ProcessConfig) { blog.Infof("the new cfg:%#v the origin cfg:%#v", t.Config, string(current.ConfigData)) var err error - t.Config.Es, err = elasticsearch.ParseConfigFromKV("es", nil) + t.Config.Es, err = elasticsearch.ParseConfig("es") if err != nil { blog.Warnf("parse es config failed: %v", err) } @@ -96,12 +96,11 @@ func Run(ctx context.Context, cancel context.CancelFunc, op *options.ServerOptio essrv := new(elasticsearch.EsSrv) if server.Config.Es.FullTextSearch == "on" { - esClient, err := elasticsearch.NewEsClient(server.Config.Es) + essrv, err = elasticsearch.NewEsClient(server.Config.Es) if err != nil { - blog.Errorf("failed to create elastic search client, err:%s", err.Error()) + blog.Errorf("failed to create elastic search client, err: %v", err) return fmt.Errorf("new es client failed, err: %v", err) } - essrv.Client = esClient } iamCli := new(iam.IAM) diff --git a/src/source_controller/cacheservice/cache/cache.go b/src/source_controller/cacheservice/cache/cache.go index 50f6dc88ce..4f52b680d4 100644 --- a/src/source_controller/cacheservice/cache/cache.go +++ b/src/source_controller/cacheservice/cache/cache.go @@ -17,6 +17,7 @@ import ( "fmt" "configcenter/src/apimachinery/discovery" + "configcenter/src/source_controller/cacheservice/cache/common" "configcenter/src/source_controller/cacheservice/cache/host" "configcenter/src/source_controller/cacheservice/cache/mainline" "configcenter/src/source_controller/cacheservice/cache/topology" @@ -29,7 +30,7 @@ import ( "configcenter/src/storage/stream" ) -// NewCache TODO +// NewCache new cache service func NewCache(reflector reflector.Interface, loopW stream.LoopInterface, isMaster discovery.ServiceManageInterface, watchDB dal.DB) (*ClientSet, error) { @@ -46,24 +47,30 @@ func NewCache(reflector reflector.Interface, loopW stream.LoopInterface, isMaste return nil, err } + if err = common.InitCache(reflector); err != nil { + return nil, fmt.Errorf("new common resource cache failed, err: %v", err) + } + mainlineClient := mainline.NewMainlineClient() hostClient := host.NewClient() cache := &ClientSet{ - Tree: topotree.NewTopologyTree(mainlineClient), - Host: hostClient, - Business: mainlineClient, - Topology: topo, - Event: watch.NewClient(watchDB, mongodb.Client(), redis.Client()), + Tree: topotree.NewTopologyTree(mainlineClient), + Host: hostClient, + Business: mainlineClient, + Topology: topo, + Event: watch.NewClient(watchDB, mongodb.Client(), redis.Client()), + CommonRes: common.NewClient(), } return cache, nil } -// ClientSet TODO +// ClientSet is the cache client set type ClientSet struct { - Tree *topotree.TopologyTree - Topology *topology.Topology - Host *host.Client - Business *mainline.Client - Event *watch.Client + Tree *topotree.TopologyTree + Topology *topology.Topology + Host *host.Client + Business *mainline.Client + Event *watch.Client + CommonRes *common.Client } diff --git a/src/source_controller/cacheservice/cache/common/cache.go b/src/source_controller/cacheservice/cache/common/cache.go new file mode 100644 index 0000000000..8ce4704937 --- /dev/null +++ b/src/source_controller/cacheservice/cache/common/cache.go @@ -0,0 +1,274 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package common defines the common resource cache +package common + +import ( + "context" + "encoding/json" + "fmt" + "sync" + "time" + + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/mapstr" + "configcenter/src/source_controller/cacheservice/cache/common/key" + "configcenter/src/storage/driver/mongodb" + "configcenter/src/storage/driver/redis" + "configcenter/src/storage/reflector" + "configcenter/src/storage/stream/types" + + rawRedis "github.com/go-redis/redis/v7" +) + +var cacheMap = make(map[key.KeyType]*commonCache) +var cacheOnce = make(map[key.KeyType]*sync.Once) + +// InitCache initialize common resource cache +func InitCache(event reflector.Interface) error { + allKeyGen := key.GetAllKeyGenerator() + + for typ, keyGen := range allKeyGen { + _, exists := cacheMap[typ] + if exists { + continue + } + + _, exists = cacheOnce[typ] + if !exists { + cacheOnce[typ] = new(sync.Once) + } + + var err error + cacheOnce[typ].Do(func() { + cacheMap[typ] = &commonCache{ + key: keyGen, + event: event, + } + + err = cacheMap[typ].Run() + }) + + if err != nil { + return fmt.Errorf("run %s cache failed, err: %v", typ, err) + } + } + + return nil +} + +type commonCache struct { + key key.KeyGenerator + event reflector.Interface +} + +// Run common resource watch +func (c *commonCache) Run() error { + _, err := redis.Client().Get(context.Background(), c.key.ListDoneKey()).Result() + if err != nil { + if !redis.IsNilErr(err) { + blog.Errorf("get %s list done redis key %s failed, err: %v", c.key.Type(), c.key.ListDoneKey(), err) + return fmt.Errorf("get %s list done redis key failed, err: %v", err) + } + + // do with list watcher. + page := 500 + listOpts := &types.ListWatchOptions{ + Options: c.key.GetWatchOpt(), + PageSize: &page, + } + + listCap := &reflector.Capable{ + OnChange: reflector.OnChangeEvent{ + OnLister: c.onUpsert, + OnAdd: c.onUpsert, + OnUpdate: c.onUpsert, + OnListerDone: c.onListDone, + OnDelete: c.onDelete, + }, + } + + blog.Info("do %s cache with list watcher.", c.key.Type()) + return c.event.ListWatcher(context.Background(), listOpts, listCap) + } + + // do with watcher only. + watchOpts := &types.WatchOptions{ + Options: c.key.GetWatchOpt(), + } + + watchCap := &reflector.Capable{ + OnChange: reflector.OnChangeEvent{ + OnAdd: c.onUpsert, + OnUpdate: c.onUpsert, + OnDelete: c.onDelete, + }, + } + + blog.Info("do %s cache with only watcher") + return c.event.Watcher(context.Background(), watchOpts, watchCap) +} + +func (c *commonCache) onUpsert(e *types.Event) { + if blog.V(4) { + blog.Infof("received %s upsert event, oid: %s, doc: %s", c.key.Type(), e.Oid, e.DocBytes) + } + + idKey, _, err := c.key.GetIDKey(e.Document) + if err != nil { + blog.Errorf("generate %s id key from upsert event failed, oid: %s, doc: %s", c.key.Type(), e.Oid, e.DocBytes) + return + } + + // get resource details from db again to avoid dirty data. + mongoData, err := c.key.GetMongoData(key.IDKind, mongodb.Client(), idKey) + if err != nil { + blog.Errorf("get %s mongo data from id key %s failed, oid: %s, doc: %s", c.key.Type(), idKey, e.Oid, e.DocBytes) + return + } + + for _, data := range mongoData { + refreshCache(c.key, data, e.Oid) + } +} + +func (c *commonCache) onListDone() { + if err := redis.Client().Set(context.Background(), c.key.ListDoneKey(), "done", 0).Err(); err != nil { + blog.Errorf("list %s data to cache and list done, but set list done key failed, err: %v", c.key.Type(), err) + return + } + blog.Info("list %s data to cache and list done") +} + +func (c *commonCache) onDelete(e *types.Event) { + blog.Infof("received %s delete event, oid: %s", e.Oid) + + filter := mapstr.MapStr{ + "oid": e.Oid, + "coll": e.Collection, + } + doc := make(map[string]mapstr.MapStr) + err := mongodb.Client().Table(common.BKTableNameDelArchive).Find(filter).Fields("detail").One(context.Background(), + &doc) + if err != nil { + blog.Errorf("get del archive failed, err: %v, oid: %s, coll: %s", err, e.Oid, e.Collection) + return + } + + pipe := redis.Client().Pipeline() + + // remove common resource detail cache + idKey, _, err := c.key.GetIDKey(doc["detail"]) + if err != nil { + blog.Errorf("generate %s id key from del archive failed, err: %v, doc: %+v", c.key.Type(), err, doc) + return + } + pipe.Del(c.key.DetailKey(idKey)) + + // delete common resource key kinds related redis keys + for _, kind := range c.key.GetAllKeyKinds() { + redisKey, err := c.key.GenerateRedisKey(kind, doc["detail"]) + if err != nil { + blog.Errorf("generate %s %s redis key from del archive failed, err: %v, doc: %+v", c.key.Type(), kind, err, + doc) + return + } + + pipe.SRem(redisKey, idKey) + } + + // remove common resource id from common resource id list + pipe.ZRem(c.key.IDListKey(), idKey) + + _, err = pipe.Exec() + if err != nil { + blog.Errorf("delete redis cache failed, err: %v, oid: %s, coll: %s", err, e.Oid, e.Collection) + return + } + blog.Infof("received %s delete event, oid: %s, delete redis keys success", c.key.Type(), e.Oid) +} + +// refreshCache refresh the common resource cache +func refreshCache(key key.KeyGenerator, data interface{}, rid string) { + idKey, score, err := key.GetIDKey(data) + if err != nil { + blog.Errorf("generate %s refresh data id key failed, err: %v, data: %+v, rid: %s", key.Type(), err, data, rid) + return + } + + detailLockKey := key.DetailLockKey(idKey) + + // get refresh lock to avoid concurrency + success, err := redis.Client().SetNX(context.Background(), detailLockKey, 1, 10*time.Second).Result() + if err != nil { + blog.Errorf("get %s detail lock %s failed, err: %v, rid: %s", key.Type(), detailLockKey, err, rid) + return + } + + if !success { + blog.V(4).Infof("do not get %s detail lock %s, skip, rid: %s", key.Type(), detailLockKey, data, rid) + return + } + + defer func() { + if err = redis.Client().Del(context.Background(), detailLockKey).Err(); err != nil { + blog.Errorf("delete %s detail lock %s failed, err: %v, rid: %s", key.Type(), detailLockKey, err, rid) + } + }() + + // refresh all key kind cache after acquiring the lock + pipeline := redis.Client().Pipeline() + ttl := key.WithRandomExpireSeconds() + + // upsert all other key kinds' redis key to id relation cache + for _, kind := range key.GetAllKeyKinds() { + redisKey, err := key.GenerateRedisKey(kind, data) + if err != nil { + blog.Errorf("generate %s %s redis key from refresh data: %+v failed, err: %v, rid: %s", key.Type(), kind, + data, err, rid) + return + } + + pipeline.Expire(redisKey, ttl) + pipeline.SAdd(redisKey, idKey) + } + + // update common resource details + detail, err := json.Marshal(data) + if err != nil { + blog.Errorf("marshal %s data %+v failed, err: %v, rid: %s", key.Type(), detail, err, rid) + return + } + pipeline.Set(key.DetailKey(idKey), string(detail), ttl) + + // add common resource id to id list. + pipeline.ZAddNX(key.IDListKey(), &rawRedis.Z{ + // set common resource id as it's score number + Score: score, + Member: idKey, + }) + + _, err = pipeline.Exec() + if err != nil { + blog.Errorf("refresh %s %s redis cache failed, err: %v, data: %+v, rid: %s", key.Type(), idKey, err, data, rid) + return + } + + blog.V(4).Infof("refresh %s cache success, id: %s, ttl: %ds, rid: %s", key.Type(), idKey, ttl/time.Second, rid) +} diff --git a/src/source_controller/cacheservice/cache/common/client.go b/src/source_controller/cacheservice/cache/common/client.go new file mode 100644 index 0000000000..c6e624219b --- /dev/null +++ b/src/source_controller/cacheservice/cache/common/client.go @@ -0,0 +1,111 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package common + +import ( + "sync" + + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/http/rest" + "configcenter/src/common/metadata" + "configcenter/src/source_controller/cacheservice/cache/common/key" + "configcenter/src/source_controller/cacheservice/cache/tools" + "configcenter/src/storage/driver/redis" +) + +var client *Client +var clientOnce sync.Once + +// NewClient new common resource client +func NewClient() *Client { + if client != nil { + return client + } + + clientOnce.Do(func() { + client = &Client{ + lock: tools.NewRefreshingLock(), + } + }) + + return client +} + +// Client is the common resource cache client +type Client struct { + lock tools.RefreshingLock +} + +// ListWithKey search common resource cache info with specified keys +func (c *Client) ListWithKey(kit *rest.Kit, cacheType string, opt *metadata.ListCommonCacheWithKeyOpt) ( + []string, error) { + + if len(cacheType) == 0 { + return nil, kit.CCError.CCErrorf(common.CCErrCommParamsNeedSet, "type") + } + + keyGenerator, err := key.GetKeyGenerator(key.KeyType(cacheType)) + if err != nil { + blog.Errorf("get %s key generator failed, err: %v, rid: %s", cacheType, err, kit.Rid) + return nil, kit.CCError.CCErrorf(common.CCErrCommParamsInvalid, "type") + } + + if rawErr := opt.Validate(); rawErr.ErrCode != 0 { + return nil, rawErr.ToCCError(kit.CCError) + } + + allIDs := make([]string, 0) + needRefreshKeys := make([]string, 0) + keyKind := key.KeyKind(opt.Kind) + if keyKind == key.IDKind { + allIDs = opt.Keys + } else { + for _, redisKey := range opt.Keys { + existRes, err := redis.Client().Exists(kit.Ctx, redisKey).Result() + if err != nil { + blog.Errorf("check if %s key %s exists failed, err: %v, rid: %s", cacheType, redisKey, err, kit.Rid) + return nil, err + } + + if existRes != 1 { + needRefreshKeys = append(needRefreshKeys, redisKey) + continue + } + + ids, err := redis.Client().SMembers(kit.Ctx, redisKey).Result() + if err != nil { + blog.Errorf("get %s ids by other key %s failed, err: %v, rid: %s", cacheType, redisKey, err, kit.Rid) + return nil, err + } + allIDs = append(allIDs, ids...) + } + } + + refreshDetails, err := c.listWithRefreshKeys(kit, keyGenerator, keyKind, needRefreshKeys, opt.Fields) + if err != nil { + return nil, err + } + + details, err := c.listWithIDs(kit, keyGenerator, allIDs, opt.Fields) + if err != nil { + blog.Errorf("list %s cache info by ids %+v failed, err: %v, rid: %s", cacheType, allIDs, err, kit.Rid) + return nil, err + } + return append(refreshDetails, details...), nil +} diff --git a/src/source_controller/cacheservice/cache/common/client_logics.go b/src/source_controller/cacheservice/cache/common/client_logics.go new file mode 100644 index 0000000000..fa824af24a --- /dev/null +++ b/src/source_controller/cacheservice/cache/common/client_logics.go @@ -0,0 +1,145 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package common + +import ( + "configcenter/src/common" + "configcenter/src/common/blog" + "configcenter/src/common/http/rest" + "configcenter/src/common/json" + "configcenter/src/common/util" + "configcenter/src/source_controller/cacheservice/cache/common/key" + "configcenter/src/storage/driver/mongodb" + "configcenter/src/storage/driver/redis" +) + +// listWithIDs list common resource info from redis with ids +// if a common resource does not exist in the cache and cannot be found in mongodb, it will not be returned +// therefore the length and sequence of the returned array may not be equal to the requested ids +func (c *Client) listWithIDs(kit *rest.Kit, generator key.KeyGenerator, ids, fields []string) ([]string, error) { + if len(ids) == 0 { + return make([]string, 0), nil + } + + ids = util.StrArrayUnique(ids) + keys := make([]string, len(ids)) + for i, id := range ids { + keys[i] = generator.DetailKey(id) + } + + results, err := redis.Client().MGet(kit.Ctx, keys...).Result() + if err != nil { + blog.Errorf("get %s ids %+v details from redis failed, err: %v, rid: %s", generator.Type(), ids, err, kit.Rid) + return nil, err + } + + needRefreshIdx := make([]int, 0) + details := make([]string, 0) + for idx, res := range results { + if res == nil { + needRefreshIdx = append(needRefreshIdx, idx) + continue + } + + detail, ok := res.(string) + if !ok { + blog.Errorf("%s %s detail(%+v) is invalid, rid: %s", generator.Type(), ids[idx], res, kit.Rid) + return nil, kit.CCError.CCErrorf(common.CCErrCommParamsInvalid, "detail") + } + + if len(fields) != 0 { + details = append(details, *json.CutJsonDataWithFields(&detail, fields)) + } else { + details = append(details, detail) + } + } + + if len(needRefreshIdx) == 0 { + return details, nil + } + + // can not find detail in cache, need refresh the cache + refreshIDs := make([]string, len(needRefreshIdx)) + for i, idx := range needRefreshIdx { + refreshIDs[i] = ids[idx] + } + + refreshDetails, err := c.listWithRefreshKeys(kit, generator, key.IDKind, refreshIDs, fields) + if err != nil { + return nil, err + } + + return append(details, refreshDetails...), nil +} + +// listWithRefreshKeys list common resource info from mongo with refresh keys +func (c *Client) listWithRefreshKeys(kit *rest.Kit, generator key.KeyGenerator, kind key.KeyKind, + keys, fields []string) ([]string, error) { + + if len(keys) == 0 { + return make([]string, 0), nil + } + + mongoData, err := generator.GetMongoData(kind, mongodb.Client(), keys...) + if err != nil { + blog.Errorf("get %s ids %+v mongo data failed, err: %v, rid: %s", generator.Type(), keys, err, kit.Rid) + return nil, err + } + + details := make([]string, 0) + for _, data := range mongoData { + c.tryRefreshDetail(generator, data, kit.Rid) + + detailJs, err := json.Marshal(data) + if err != nil { + blog.Errorf("marshal %s mongo data %+v failed, err: %v, rid: %s", generator.Type(), data, err, kit.Rid) + return nil, err + } + detailStr := string(detailJs) + + if len(fields) != 0 { + details = append(details, *json.CutJsonDataWithFields(&detailStr, fields)) + } else { + details = append(details, detailStr) + } + } + return details, nil +} + +func (c *Client) tryRefreshDetail(generator key.KeyGenerator, data interface{}, rid string) { + idKey, _, err := generator.GetIDKey(data) + if err != nil { + blog.Errorf("generate %s try refresh key failed, err: %v, data: %+v, rid: %s", generator.Type(), err, data, rid) + return + } + + detailKey := generator.DetailKey(idKey) + if !c.lock.CanRefresh(detailKey) { + return + } + + // set refreshing status + c.lock.SetRefreshing(detailKey) + + // check if we can refresh the common resource detail cache + go func() { + defer c.lock.SetUnRefreshing(detailKey) + + refreshCache(generator, data, rid) + }() +} diff --git a/src/source_controller/cacheservice/cache/common/key/attribute.go b/src/source_controller/cacheservice/cache/common/key/attribute.go new file mode 100644 index 0000000000..e4b448f5da --- /dev/null +++ b/src/source_controller/cacheservice/cache/common/key/attribute.go @@ -0,0 +1,58 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package key + +import ( + "fmt" + "time" + + "configcenter/src/common" + "configcenter/src/common/mapstr" + "configcenter/src/storage/dal" + "configcenter/src/storage/stream/types" +) + +func init() { + addKeyGenerator(AttributeType, attributeKey) +} + +var attributeKey = KeyGenerator{ + namespace: fmt.Sprintf("%s%s:", common.BKCacheKeyV3Prefix, AttributeType), + watchOpt: types.Options{ + EventStruct: new(mapstr.MapStr), + Collection: common.BKTableNameObjAttDes, + }, + expireSeconds: 30 * 60 * time.Second, + expireRangeSeconds: [2]int{-600, 600}, + idGen: func(data interface{}) (string, float64, error) { + return commonIDGenerator(data, common.BKFieldID) + }, + keyGenMap: map[KeyKind]redisKeyGenerator{ + ObjIDKind: func(data interface{}) (string, error) { + return commonKeyGenerator(data, common.BKObjIDField) + }, + }, + dataGetterMap: map[KeyKind]dataGetter{ + IDKind: func(db dal.DB, keys ...string) ([]interface{}, error) { + return commonIDDataGetter(db, common.BKTableNameObjAttDes, common.BKFieldID, keys...) + }, + ObjIDKind: func(db dal.DB, keys ...string) ([]interface{}, error) { + return commonKeyDataGetter(db, common.BKTableNameObjAttDes, common.BKObjIDField, keys...) + }, + }, +} diff --git a/src/source_controller/cacheservice/cache/common/key/key.go b/src/source_controller/cacheservice/cache/common/key/key.go new file mode 100644 index 0000000000..f6bbd26afd --- /dev/null +++ b/src/source_controller/cacheservice/cache/common/key/key.go @@ -0,0 +1,176 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +// Package key defines the cache key generator logics +package key + +import ( + "errors" + "fmt" + "math/rand" + "strings" + "time" + + "configcenter/src/storage/dal" + "configcenter/src/storage/stream/types" +) + +var allKeyInfo = make(map[KeyType]KeyGenerator) + +func addKeyGenerator(typ KeyType, generator KeyGenerator) { + generator.typ = typ + allKeyInfo[typ] = generator +} + +// GetAllKeyGenerator get all key type -> cache key generator map +func GetAllKeyGenerator() map[KeyType]KeyGenerator { + return allKeyInfo +} + +// GetKeyGenerator get cache key generator from key type +func GetKeyGenerator(typ KeyType) (KeyGenerator, error) { + key, exists := allKeyInfo[typ] + if !exists { + return key, fmt.Errorf("cache key type %s is invalid", typ) + } + return key, nil +} + +// KeyGenerator is the common resource cache key generator +type KeyGenerator struct { + // typ is the cache key's type + typ KeyType + // namespace is the cache key's namespace + namespace string + // watchOpt is the mongo watch options + watchOpt types.Options + // expireSeconds is the ttl for the key. + // it's always used with the expireRangeSeconds to avoid the keys to be expired at the same time, which + // will have large numbers of request flood to the mongodb, we can not accept that. + // for example, if expireSeconds is 30min, expireRangeSeconds is [-600, 600], then + // a key's expire seconds is between [20, 40] minutes. + expireSeconds time.Duration + // min range:expireRangeSeconds[0], max range:expireRangeSeconds[1] + expireRangeSeconds [2]int + // idGen generates redis id field and score + idGen func(data interface{}) (string, float64, error) + // keyGenMap is the mapping of key kind to the key generator + keyGenMap map[KeyKind]redisKeyGenerator + // dataGetterMap is the mapping of key kind to the db data getter + dataGetterMap map[KeyKind]dataGetter +} + +// redisKeyGenerator generates redis key suffix from data +type redisKeyGenerator func(data interface{}) (string, error) + +// dataGetter get mongodb data by redis keys +type dataGetter func(db dal.DB, keys ...string) ([]interface{}, error) + +// Type returns the cache key's type +func (k KeyGenerator) Type() KeyType { + return k.typ +} + +// DetailKey is the redis key to store the resource detail, can only be generated by id +func (k KeyGenerator) DetailKey(id string) string { + return k.namespace + "detail:" + id +} + +// DetailLockKey is the redis key to lock the detail key related operations +func (k KeyGenerator) DetailLockKey(id string) string { + return k.namespace + "detail:lock:" + id +} + +// ListDoneKey is the redis key indicating that the list phase is complete +func (k KeyGenerator) ListDoneKey() string { + return k.namespace + "listdone" +} + +// IDListKey is a redis zset(sorted set) key to store all the host ids, which is used to paged host id quickly, +// without use mongodb's sort method, which is much more expensive. +// this key has a expired ttl. +// We use the host id as the default zset key's score, so that we can use host id as score and page's +// sort fields to sort host. +func (k KeyGenerator) IDListKey() string { + return k.namespace + "id_list" +} + +// WithRandomExpireSeconds generate random redis key expire in seconds +func (k KeyGenerator) WithRandomExpireSeconds() time.Duration { + rand.Seed(time.Now().UnixNano()) + seconds := rand.Intn(k.expireRangeSeconds[1]-k.expireRangeSeconds[0]) + k.expireRangeSeconds[0] + return k.expireSeconds + time.Duration(seconds)*time.Second +} + +// GetWatchOpt get mongodb watch options +func (k KeyGenerator) GetWatchOpt() types.Options { + return k.watchOpt +} + +// GetAllKeyKinds get all cache key supported key kinds except for id kind +func (k KeyGenerator) GetAllKeyKinds() []KeyKind { + keyKinds := make([]KeyKind, 0) + for kind := range k.keyGenMap { + keyKinds = append(keyKinds, kind) + } + return keyKinds +} + +// GenerateRedisKey generate redis key +func (k KeyGenerator) GenerateRedisKey(kind KeyKind, data interface{}) (string, error) { + if data == nil { + return "", errors.New("data is nil") + } + + suffix, err := k.keyGenMap[kind](data) + if err != nil { + return "", err + } + + return k.namespace + string(kind) + ":" + suffix, nil +} + +// GetIDKey get id key from data +func (k KeyGenerator) GetIDKey(data interface{}) (string, float64, error) { + if data == nil { + return "", 0, errors.New("data is nil") + } + return k.idGen(data) +} + +// GetMongoData get mongodb data +func (k KeyGenerator) GetMongoData(kind KeyKind, db dal.DB, keys ...string) ([]interface{}, error) { + keyLen := len(keys) + if keyLen == 0 { + return nil, errors.New("keys are not set") + } + + if keyLen > 500 { + return nil, errors.New("keys exceeds maximum limit") + } + + suffixes := make([]string, keyLen) + for i, key := range keys { + suffixes[i] = strings.TrimPrefix(key, k.namespace+string(kind)+":") + } + + data, err := k.dataGetterMap[kind](db, suffixes...) + if err != nil { + return nil, err + } + return data, nil +} diff --git a/src/source_controller/cacheservice/cache/common/key/model.go b/src/source_controller/cacheservice/cache/common/key/model.go new file mode 100644 index 0000000000..0a77b937e4 --- /dev/null +++ b/src/source_controller/cacheservice/cache/common/key/model.go @@ -0,0 +1,58 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package key + +import ( + "fmt" + "time" + + "configcenter/src/common" + "configcenter/src/common/mapstr" + "configcenter/src/storage/dal" + "configcenter/src/storage/stream/types" +) + +func init() { + addKeyGenerator(ModelType, modelKey) +} + +var modelKey = KeyGenerator{ + namespace: fmt.Sprintf("%s%s:", common.BKCacheKeyV3Prefix, ModelType), + watchOpt: types.Options{ + EventStruct: new(mapstr.MapStr), + Collection: common.BKTableNameObjDes, + }, + expireSeconds: 30 * 60 * time.Second, + expireRangeSeconds: [2]int{-600, 600}, + idGen: func(data interface{}) (string, float64, error) { + return commonIDGenerator(data, common.BKFieldID) + }, + keyGenMap: map[KeyKind]redisKeyGenerator{ + ObjIDKind: func(data interface{}) (string, error) { + return commonKeyGenerator(data, common.BKObjIDField) + }, + }, + dataGetterMap: map[KeyKind]dataGetter{ + IDKind: func(db dal.DB, keys ...string) ([]interface{}, error) { + return commonIDDataGetter(db, common.BKTableNameObjDes, common.BKFieldID, keys...) + }, + ObjIDKind: func(db dal.DB, keys ...string) ([]interface{}, error) { + return commonKeyDataGetter(db, common.BKTableNameObjDes, common.BKObjIDField, keys...) + }, + }, +} diff --git a/src/source_controller/cacheservice/cache/common/key/quote.go b/src/source_controller/cacheservice/cache/common/key/quote.go new file mode 100644 index 0000000000..a5631293b9 --- /dev/null +++ b/src/source_controller/cacheservice/cache/common/key/quote.go @@ -0,0 +1,58 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package key + +import ( + "fmt" + "time" + + "configcenter/src/common" + "configcenter/src/common/mapstr" + "configcenter/src/storage/dal" + "configcenter/src/storage/stream/types" +) + +func init() { + addKeyGenerator(ModelQuoteRelType, modelQuoteRelKey) +} + +var modelQuoteRelKey = KeyGenerator{ + namespace: fmt.Sprintf("%s%s:", common.BKCacheKeyV3Prefix, ModelQuoteRelType), + watchOpt: types.Options{ + EventStruct: new(mapstr.MapStr), + Collection: common.BKTableNameModelQuoteRelation, + }, + expireSeconds: 30 * 60 * time.Second, + expireRangeSeconds: [2]int{-600, 600}, + idGen: func(data interface{}) (string, float64, error) { + return commonOidGenerator(data) + }, + keyGenMap: map[KeyKind]redisKeyGenerator{ + DestModelKind: func(data interface{}) (string, error) { + return commonKeyGenerator(data, common.BKDestModelField) + }, + }, + dataGetterMap: map[KeyKind]dataGetter{ + IDKind: func(db dal.DB, keys ...string) ([]interface{}, error) { + return commonOidDataGetter(db, common.BKTableNameModelQuoteRelation, keys...) + }, + DestModelKind: func(db dal.DB, keys ...string) ([]interface{}, error) { + return commonKeyWithOidGetter(db, common.BKTableNameModelQuoteRelation, common.BKDestModelField, keys...) + }, + }, +} diff --git a/src/source_controller/cacheservice/cache/common/key/types.go b/src/source_controller/cacheservice/cache/common/key/types.go new file mode 100644 index 0000000000..ef5a3a6a2f --- /dev/null +++ b/src/source_controller/cacheservice/cache/common/key/types.go @@ -0,0 +1,43 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package key + +// KeyType is the key type for common resource cache +type KeyType string + +var ( + // ModelType is the key type for model cache + ModelType KeyType = "model" + // AttributeType is the key type for model attribute cache + AttributeType KeyType = "attribute" + // ModelQuoteRelType is the key type for model quote relation cache + ModelQuoteRelType KeyType = "model_quote_relation" +) + +// KeyKind defines the cache key's different kind of caching aspects +type KeyKind string + +var ( + // IDKind is the key kind for data id that stores the detail of the cache key type + // other kind of keys only stores the id to get detail from IDKind + IDKind KeyKind = "id" + // ObjIDKind is the obj id key kind + ObjIDKind KeyKind = "bk_obj_id" + // DestModelKind is the destination model id key kind + DestModelKind KeyKind = "dest_model" +) diff --git a/src/source_controller/cacheservice/cache/common/key/util.go b/src/source_controller/cacheservice/cache/common/key/util.go new file mode 100644 index 0000000000..d1d198ba5b --- /dev/null +++ b/src/source_controller/cacheservice/cache/common/key/util.go @@ -0,0 +1,182 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package key + +import ( + "context" + "errors" + "strconv" + "strings" + + "configcenter/src/common" + "configcenter/src/common/mapstr" + "configcenter/src/common/util" + "configcenter/src/storage/dal" + "configcenter/src/storage/dal/types" + + "go.mongodb.org/mongo-driver/bson/primitive" +) + +func parseMapStrData(data interface{}) (mapstr.MapStr, error) { + switch t := data.(type) { + case *mapstr.MapStr: + if t == nil { + return nil, errors.New("data is nil") + } + return *t, nil + case *map[string]interface{}: + if t == nil { + return nil, errors.New("data is nil") + } + return *t, nil + case mapstr.MapStr: + return t, nil + case map[string]interface{}: + return t, nil + default: + return nil, errors.New("data is not of mapstr.MapStr type") + } +} + +func commonIDGenerator(data interface{}, field string) (string, float64, error) { + commonData, err := parseMapStrData(data) + if err != nil { + return "", 0, err + } + + id, err := util.GetInt64ByInterface(commonData[field]) + if err != nil { + return "", 0, err + } + + return strconv.FormatInt(id, 10), float64(id), nil +} + +func commonOidGenerator(data interface{}) (string, float64, error) { + commonData, err := parseMapStrData(data) + if err != nil { + return "", 0, err + } + + _, exists := commonData[common.MongoMetaID] + if !exists { + return "", 0, errors.New("data oid field is not exists") + } + + switch t := commonData[common.MongoMetaID].(type) { + case primitive.ObjectID: + return t.Hex(), 0, nil + case string: + return t, 0, nil + default: + return "", 0, errors.New("data oid field is invalid") + } +} + +func commonKeyGenerator(data interface{}, fields ...string) (string, error) { + if dataStr, ok := data.(string); ok { + return dataStr, nil + } + + commonData, err := parseMapStrData(data) + if err != nil { + return "", err + } + + var keys []string + for _, field := range fields { + keys = append(keys, util.GetStrByInterface(commonData[field])) + } + + key := strings.Join(keys, "||") + if len(key) == 0 { + return "", errors.New("key is empty") + } + + return key, nil +} + +func commonIDDataGetter(db dal.DB, table, idField string, keys ...string) ([]interface{}, error) { + ids := make([]int64, len(keys)) + var err error + + for i, key := range keys { + ids[i], err = strconv.ParseInt(key, 10, 64) + if err != nil { + return nil, err + } + } + + cond := mapstr.MapStr{ + idField: mapstr.MapStr{common.BKDBIN: ids}, + } + + return getCommonDBDataByCond(db, table, cond) +} + +func commonKeyDataGetter(db dal.DB, table, field string, keys ...string) ([]interface{}, error) { + cond := mapstr.MapStr{ + field: mapstr.MapStr{common.BKDBIN: keys}, + } + + return getCommonDBDataByCond(db, table, cond) +} + +func commonOidDataGetter(db dal.DB, table string, keys ...string) ([]interface{}, error) { + oids := make([]primitive.ObjectID, len(keys)) + for i, key := range keys { + oid, err := primitive.ObjectIDFromHex(key) + if err != nil { + return nil, err + } + oids[i] = oid + } + + cond := mapstr.MapStr{ + common.MongoMetaID: mapstr.MapStr{common.BKDBIN: oids}, + } + + opts := types.NewFindOpts().SetWithObjectID(true) + return getCommonDBDataByCond(db, table, cond, opts) +} + +func commonKeyWithOidGetter(db dal.DB, table, field string, keys ...string) ([]interface{}, error) { + cond := mapstr.MapStr{ + field: mapstr.MapStr{common.BKDBIN: keys}, + } + + opts := types.NewFindOpts().SetWithObjectID(true) + return getCommonDBDataByCond(db, table, cond, opts) +} + +func getCommonDBDataByCond(db dal.DB, table string, cond mapstr.MapStr, opts ...*types.FindOpts) ([]interface{}, + error) { + + dataArr := make([]mapstr.MapStr, 0) + err := db.Table(table).Find(cond, opts...).All(context.Background(), &dataArr) + if err != nil { + return nil, err + } + + result := make([]interface{}, len(dataArr)) + for i, data := range dataArr { + result[i] = data + } + + return result, nil +} diff --git a/src/source_controller/cacheservice/service/cache.go b/src/source_controller/cacheservice/service/cache.go index bc95a6269b..bfc9cb1975 100644 --- a/src/source_controller/cacheservice/service/cache.go +++ b/src/source_controller/cacheservice/service/cache.go @@ -69,7 +69,8 @@ func (s *cacheService) SearchHostWithHostIDInCache(ctx *rest.Contexts) { host, err := s.cacheSet.Host.GetHostWithID(ctx.Kit.Ctx, opt) if err != nil { - ctx.RespErrorCodeOnly(common.CCErrCommDBSelectFailed, "search host with id in cache, but get host failed, err: %v", err) + ctx.RespErrorCodeOnly(common.CCErrCommDBSelectFailed, + "search host with id in cache, but get host failed, err: %v", err) return } ctx.RespString(&host) @@ -88,7 +89,8 @@ func (s *cacheService) ListHostWithHostIDInCache(ctx *rest.Contexts) { host, err := s.cacheSet.Host.ListHostWithHostIDs(ctx.Kit.Ctx, opt) if err != nil { - ctx.RespErrorCodeOnly(common.CCErrCommDBSelectFailed, "list host with id in cache, but get host failed, err: %v", err) + ctx.RespErrorCodeOnly(common.CCErrCommDBSelectFailed, + "list host with id in cache, but get host failed, err: %v", err) return } ctx.RespStringArray(host) @@ -104,7 +106,8 @@ func (s *cacheService) ListHostWithPageInCache(ctx *rest.Contexts) { cnt, host, err := s.cacheSet.Host.ListHostsWithPage(ctx.Kit.Ctx, opt) if err != nil { - ctx.RespErrorCodeOnly(common.CCErrCommDBSelectFailed, "list host with id in cache, but get host failed, err: %v", err) + ctx.RespErrorCodeOnly(common.CCErrCommDBSelectFailed, + "list host with id in cache, but get host failed, err: %v", err) return } ctx.RespCountInfoString(cnt, host) @@ -169,7 +172,8 @@ func (s *cacheService) SearchBusinessInCache(ctx *rest.Contexts) { } biz, err := s.cacheSet.Business.GetBusiness(ctx.Kit.Ctx, bizID) if err != nil { - ctx.RespErrorCodeOnly(common.CCErrCommDBSelectFailed, "search biz with id in cache, but get biz failed, err: %v", err) + ctx.RespErrorCodeOnly(common.CCErrCommDBSelectFailed, + "search biz with id in cache, but get biz failed, err: %v", err) return } ctx.RespString(&biz) @@ -219,7 +223,8 @@ func (s *cacheService) SearchCustomLayerInCache(ctx *rest.Contexts) { inst, err := s.cacheSet.Business.GetCustomLevelDetail(ctx.Kit.Ctx, objID, ctx.Kit.SupplierAccount, instID) if err != nil { - ctx.RespErrorCodeOnly(common.CCErrCommDBSelectFailed, "search custom layer with id in cache failed, err: %v", err) + ctx.RespErrorCodeOnly(common.CCErrCommDBSelectFailed, "search custom layer with id in cache failed, err: %v", + err) return } ctx.RespString(&inst) @@ -312,7 +317,8 @@ func (s *cacheService) WatchEvent(ctx *rest.Contexts) { if len(options.Cursor) != 0 { events, err := s.cacheSet.Event.WatchWithCursor(ctx.Kit, key, options) if err != nil { - blog.Errorf("watch event with cursor failed, cursor: %s, err: %v, rid: %s", options.Cursor, err, ctx.Kit.Rid) + blog.Errorf("watch event with cursor failed, cursor: %s, err: %v, rid: %s", options.Cursor, err, + ctx.Kit.Rid) ctx.RespAutoError(err) return } @@ -404,3 +410,25 @@ func (s *cacheService) generateWatchEventResp(startCursor string, rsc watch.Curs return result } + +// ListCommonCacheWithKey search common resource cache info with specified keys +func (s *cacheService) ListCommonCacheWithKey(cts *rest.Contexts) { + cacheType := cts.Request.PathParameter("type") + if len(cacheType) == 0 { + cts.RespAutoError(cts.Kit.CCError.CCErrorf(common.CCErrCommParamsNeedSet, "type")) + return + } + + opt := new(metadata.ListCommonCacheWithKeyOpt) + if err := cts.DecodeInto(opt); err != nil { + cts.RespAutoError(err) + return + } + + res, err := s.cacheSet.CommonRes.ListWithKey(cts.Kit, cacheType, opt) + if err != nil { + cts.RespAutoError(err) + return + } + cts.RespStringArray(res) +} diff --git a/src/source_controller/cacheservice/service/service_initfunc.go b/src/source_controller/cacheservice/service/service_initfunc.go index 28571eb461..c9b3622af1 100644 --- a/src/source_controller/cacheservice/service/service_initfunc.go +++ b/src/source_controller/cacheservice/service/service_initfunc.go @@ -13,6 +13,7 @@ package service import ( + "fmt" "net/http" "configcenter/src/common/http/rest" @@ -25,82 +26,39 @@ func (s *cacheService) initCache(web *restful.WebService) { ErrorIf: s.engine.CCErr, Language: s.engine.Language, }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/find/cache/host/with_inner_ip", - Handler: s.SearchHostWithInnerIPInCache, - }) + + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/find/cache/host/with_inner_ip", + Handler: s.SearchHostWithInnerIPInCache}) // Note: onlg for datacollection api!!! - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/find/cache/host/with_agent_id", - Handler: s.SearchHostWithAgentIDInCache, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/find/cache/host/with_host_id", - Handler: s.SearchHostWithHostIDInCache, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/findmany/cache/host/with_host_id", - Handler: s.ListHostWithHostIDInCache, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/findmany/cache/host/with_page", - Handler: s.ListHostWithPageInCache, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/find/cache/biz/{bk_biz_id}", - Handler: s.SearchBusinessInCache, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/findmany/cache/biz", - Handler: s.ListBusinessInCache, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/find/cache/set/{bk_set_id}", - Handler: s.SearchSetInCache, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/findmany/cache/set", - Handler: s.ListSetsInCache, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/find/cache/module/{bk_module_id}", - Handler: s.SearchModuleInCache, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/findmany/cache/module", - Handler: s.ListModulesInCache, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/find/cache/{bk_obj_id}/{bk_inst_id}", - Handler: s.SearchCustomLayerInCache, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "find/cache/topo/node_path/biz/{bk_biz_id}", - Handler: s.SearchBizTopologyNodePath, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodGet, - Path: "/find/cache/topo/brief/biz/{biz}", - Handler: s.SearchBusinessBriefTopology, - }) - utility.AddHandler(rest.Action{ - Verb: http.MethodPost, - Path: "/watch/cache/event", - Handler: s.WatchEvent, - }) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/find/cache/host/with_agent_id", + Handler: s.SearchHostWithAgentIDInCache}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/find/cache/host/with_host_id", + Handler: s.SearchHostWithHostIDInCache}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/cache/host/with_host_id", + Handler: s.ListHostWithHostIDInCache}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/cache/host/with_page", + Handler: s.ListHostWithPageInCache}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/find/cache/biz/{bk_biz_id}", + Handler: s.SearchBusinessInCache}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/cache/biz", Handler: s.ListBusinessInCache}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/find/cache/set/{bk_set_id}", + Handler: s.SearchSetInCache}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/cache/set", Handler: s.ListSetsInCache}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/find/cache/module/{bk_module_id}", + Handler: s.SearchModuleInCache}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/findmany/cache/module", + Handler: s.ListModulesInCache}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/find/cache/{bk_obj_id}/{bk_inst_id}", + Handler: s.SearchCustomLayerInCache}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "find/cache/topo/node_path/biz/{bk_biz_id}", + Handler: s.SearchBizTopologyNodePath}) + utility.AddHandler(rest.Action{Verb: http.MethodGet, Path: "/find/cache/topo/brief/biz/{biz}", + Handler: s.SearchBusinessBriefTopology}) + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: "/watch/cache/event", Handler: s.WatchEvent}) + + // common resource cache api + utility.AddHandler(rest.Action{Verb: http.MethodPost, Path: fmt.Sprintf("/find/cache/type/{type}/with_key"), + Handler: s.ListCommonCacheWithKey}) utility.AddToRestfulWebService(web) } diff --git a/src/storage/dal/mongo/local/mongo.go b/src/storage/dal/mongo/local/mongo.go index a54146ebfd..de87f693f8 100644 --- a/src/storage/dal/mongo/local/mongo.go +++ b/src/storage/dal/mongo/local/mongo.go @@ -646,6 +646,9 @@ func (c *Collection) tryArchiveDeletedDoc(ctx context.Context, filter types.Filt case common.BKTableNameBasePlat: case common.BKTableNameBaseProject: + // NOTE: should not use the table name for archive, the object instance and association + // was saved in sharding tables, we still case the BKTableNameBaseInst here for the archive + // error message in order to find the wrong table name used in logics level. case common.BKTableNameBaseInst: case common.BKTableNameInstAsst: @@ -664,11 +667,11 @@ func (c *Collection) tryArchiveDeletedDoc(ctx context.Context, filter types.Filt case kubetypes.BKTableNameBaseCustom: case kubetypes.BKTableNameBasePod: case kubetypes.BKTableNameBaseContainer: - // NOTE: should not use the table name for archive, the object instance and association - // was saved in sharding tables, we still case the BKTableNameBaseInst here for the archive - // error message in order to find the wrong table name used in logics level. - // TODO add del archive for container tables + case common.BKTableNameObjDes: + case common.BKTableNameObjAttDes: + case common.BKTableNameModelQuoteRelation: + default: if !common.IsObjectShardingTable(c.collName) { // do not archive the delete docs diff --git a/src/storage/driver/mongodb/monogdb.go b/src/storage/driver/mongodb/monogdb.go index 8ee54f9e0f..f572eaf26e 100644 --- a/src/storage/driver/mongodb/monogdb.go +++ b/src/storage/driver/mongodb/monogdb.go @@ -33,23 +33,28 @@ import ( */ var ( - db dal.RDB + dbMap = make(map[string]dal.DB) + // 在并发的情况下,这里存在panic的问题 lastInitErr errors.CCErrorCoder lastConfigErr errors.CCErrorCoder ) // Client get default error -func Client() dal.RDB { - return db +func Client(prefix ...string) dal.DB { + var pre string + if len(prefix) > 0 { + pre = prefix[0] + } + return dbMap[pre] } // Table 获取操作db table的对象 func Table(name string) dbType.Table { - return db.Table(name) + return Client().Table(name) } -// ParseConfig TODO +// ParseConfig parse mongodb configuration func ParseConfig(prefix string, configMap map[string]string) (*mongo.Config, errors.CCErrorCoder) { lastConfigErr = nil config, err := cc.Mongo(prefix) @@ -80,47 +85,46 @@ func ParseConfig(prefix string, configMap map[string]string) (*mongo.Config, err return &config, nil } -// InitClient TODO +// InitClient init mongodb client func InitClient(prefix string, config *mongo.Config) errors.CCErrorCoder { lastInitErr = nil var dbErr error - db, dbErr = local.NewMgo(config.GetMongoConf(), time.Minute) + dbMap[prefix], dbErr = local.NewMgo(config.GetMongoConf(), time.Minute) if dbErr != nil { blog.Errorf("failed to connect the mongo server, error info is %s", dbErr.Error()) - lastInitErr = errors.NewCCError(common.CCErrCommResourceInitFailed, "'"+prefix+".mongodb' initialization failed") + lastInitErr = errors.NewCCError(common.CCErrCommResourceInitFailed, + "'"+prefix+".mongodb' initialization failed") return lastInitErr } return nil } -// Validate TODO -func Validate() errors.CCErrorCoder { - return nil -} - -// UpdateConfig TODO +// UpdateConfig update mongodb configuration func UpdateConfig(prefix string, config mongo.Config) { - // 不支持热更行 + // 不支持热更新 return } -// Healthz TODO +// Healthz check db health status func Healthz() (items []metric.HealthItem) { - item := &metric.HealthItem{ IsHealthy: true, Name: types.CCFunctionalityMongo, } items = append(items, *item) - if db == nil { - item.IsHealthy = false - item.Message = "not initialized" - return - } - if err := db.Ping(); err != nil { - item.IsHealthy = false - item.Message = "connect error. err: " + err.Error() - return + + for prefix, db := range dbMap { + if db == nil { + item.IsHealthy = false + item.Message = prefix + " db not initialized" + return + } + + if err := db.Ping(); err != nil { + item.IsHealthy = false + item.Message = prefix + " db connect error. err: " + err.Error() + return + } } return diff --git a/src/thirdparty/elasticsearch/esclient.go b/src/thirdparty/elasticsearch/esclient.go index 3abe766d50..982c2f7ec5 100644 --- a/src/thirdparty/elasticsearch/esclient.go +++ b/src/thirdparty/elasticsearch/esclient.go @@ -17,23 +17,34 @@ import ( "github.com/olivere/elastic/v7" ) -// EsSrv TODO +// EsSrv es client type EsSrv struct { Client *elastic.Client } -// NewEsClient TODO -func NewEsClient(esConf EsConfig) (*elastic.Client, error) { +// NewEsClient new es client +func NewEsClient(esConf *EsConfig) (*EsSrv, error) { + if esConf == nil { + return nil, errors.New("es config is not set") + } + // Obtain a client and connect to the default ElasticSearch installation // on 127.0.0.1:9200. Of course you can configure your client to connect // to other hosts and configure it in various other ways. - httpClient := &http.Client{} - client := &elastic.Client{} - var err error + httpClient := new(http.Client) + + cliOpt := []elastic.ClientOptionFunc{ + elastic.SetURL(esConf.EsUrl), + elastic.SetSniff(false), + elastic.SetBasicAuth(esConf.EsUser, esConf.EsPassword), + } + if strings.HasPrefix(esConf.EsUrl, "https://") { tlsConfig := new(tls.Config) tlsConfig.InsecureSkipVerify = esConf.TLSClientConfig.InsecureSkipVerify - if !tlsConfig.InsecureSkipVerify && len(esConf.TLSClientConfig.CAFile) != 0 && len(esConf.TLSClientConfig.CertFile) != 0 && len(esConf.TLSClientConfig.KeyFile) != 0 { + + if !tlsConfig.InsecureSkipVerify && len(esConf.TLSClientConfig.CAFile) != 0 && + len(esConf.TLSClientConfig.CertFile) != 0 && len(esConf.TLSClientConfig.KeyFile) != 0 { var err error tlsConfig, err = ssl.ClientTLSConfVerity(esConf.TLSClientConfig.CAFile, esConf.TLSClientConfig.CertFile, esConf.TLSClientConfig.KeyFile, esConf.TLSClientConfig.Password) @@ -41,38 +52,29 @@ func NewEsClient(esConf EsConfig) (*elastic.Client, error) { return nil, err } } + // if use https tls or else, config httpClient first tr := &http.Transport{ TLSClientConfig: tlsConfig, } httpClient.Transport = tr - client, err = elastic.NewClient( - elastic.SetHttpClient(httpClient), - elastic.SetURL(esConf.EsUrl), - elastic.SetScheme("https"), - elastic.SetSniff(false), - elastic.SetBasicAuth(esConf.EsUser, esConf.EsPassword)) - if err != nil { - blog.Errorf("create new es https es client error, err: %v", err) - return nil, err - } - } else { - client, err = elastic.NewClient( - elastic.SetHttpClient(httpClient), - elastic.SetURL(esConf.EsUrl), - elastic.SetSniff(false), - elastic.SetBasicAuth(esConf.EsUser, esConf.EsPassword)) - if err != nil { - blog.Errorf("create new http es client error, err: %v", err) - return nil, err - } + + cliOpt = append(cliOpt, elastic.SetScheme("https")) + } + + cliOpt = append(cliOpt, elastic.SetHttpClient(httpClient)) + + client, err := elastic.NewClient(cliOpt...) + if err != nil { + blog.Errorf("create new es https es client error, err: %v", err) + return nil, err } // it's amazing that we found new client result success with value nil once a time. if client == nil { return nil, errors.New("create es client, but it's is nil") } - return client, nil + return &EsSrv{Client: client}, nil } // Search search elastic with target conditions. @@ -123,7 +125,7 @@ func (es *EsSrv) Count(ctx context.Context, query elastic.Query, indexes []strin return count, nil } -// EsConfig TODO +// EsConfig es configuration type EsConfig struct { FullTextSearch string EsUrl string @@ -132,20 +134,25 @@ type EsConfig struct { TLSClientConfig apiutil.TLSClientConfig } -// ParseConfigFromKV returns a new config -func ParseConfigFromKV(prefix string, configMap map[string]string) (EsConfig, error) { +// ParseConfig returns a new es config from config file +func ParseConfig(prefix string) (*EsConfig, error) { fullTextSearch, _ := cc.String(prefix + ".fullTextSearch") url, _ := cc.String(prefix + ".url") usr, _ := cc.String(prefix + ".usr") pwd, _ := cc.String(prefix + ".pwd") - conf := EsConfig{ + conf := &EsConfig{ FullTextSearch: fullTextSearch, EsUrl: url, EsUser: usr, EsPassword: pwd, } + var err error conf.TLSClientConfig, err = apiutil.NewTLSClientConfigFromConfig(prefix) - return conf, err + if err != nil { + return nil, err + } + + return conf, nil } diff --git a/src/tools/cmdb_ctl/cmd/sync.go b/src/tools/cmdb_ctl/cmd/sync.go new file mode 100644 index 0000000000..c3425861a7 --- /dev/null +++ b/src/tools/cmdb_ctl/cmd/sync.go @@ -0,0 +1,143 @@ +/* + * Tencent is pleased to support the open source community by making + * 蓝鲸智云 - 配置平台 (BlueKing - Configuration System) available. + * Copyright (C) 2017 THL A29 Limited, + * a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at http://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * We undertake not to change the open source license (MIT license) applicable + * to the current version of the project delivered to anyone in the future. + */ + +package cmd + +import ( + "encoding/json" + "fmt" + + ftypes "configcenter/pkg/types/sync/full-text-search" + "configcenter/src/common/metadata" + "configcenter/src/common/types" + + "github.com/spf13/cobra" +) + +func init() { + rootCmd.AddCommand(NewSyncCommand()) +} + +// NewSyncCommand new sync command +func NewSyncCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "sync", + Short: "sync resource related operation", + Run: func(cmd *cobra.Command, args []string) { + _ = cmd.Help() + }, + } + + cmd.AddCommand(NewFullTextSearchCmd()) + + return cmd +} + +// NewFullTextSearchCmd new full-text-search sync command +func NewFullTextSearchCmd() *cobra.Command { + conf := new(fullTextSearchConf) + + cmd := &cobra.Command{ + Use: "full-text-search", + Short: "sync full-text-search related info", + RunE: func(cmd *cobra.Command, args []string) error { + return runFullTextSearchSync(conf) + }, + } + + conf.addFlags(cmd) + return cmd +} + +type fullTextSearchConf struct { + isSyncData bool + isMigrate bool + dataOpt ftypes.SyncDataOption +} + +func (c *fullTextSearchConf) addFlags(cmd *cobra.Command) { + cmd.PersistentFlags().BoolVar(&c.isMigrate, "is-migrate", false, "is migrate full-text-search data") + cmd.PersistentFlags().BoolVar(&c.isSyncData, "is-sync-data", false, "is sync specified full-text-search data") + cmd.PersistentFlags().BoolVar(&c.dataOpt.IsAll, "is-all", false, "is sync all full-text-search data") + cmd.PersistentFlags().StringVar(&c.dataOpt.Index, "index", "", "need sync index") + cmd.PersistentFlags().StringVar(&c.dataOpt.Collection, "collection", "", "need sync collection") + cmd.PersistentFlags().StringSliceVar(&c.dataOpt.Oids, "oids", make([]string, 0), "need sync data ids") +} + +func runFullTextSearchSync(c *fullTextSearchConf) error { + if c.isMigrate { + return runFullTextSearchMigrate() + } + + if c.isSyncData { + return runFullTextSearchDataSync(&c.dataOpt) + } + + return fmt.Errorf("one of is-migrate and is-sync-data option must be set") +} + +func runFullTextSearchMigrate() error { + resp, err := doCmdbHttpRequest(types.CC_MODULE_SYNC, "/sync/v3/migrate/full/text/search", "{}") + if err != nil { + return err + } + + res := new(migrateResp) + if err = json.NewDecoder(resp.Body).Decode(res); err != nil { + fmt.Printf("decode response body failed, err: %v\n", err) + return err + } + + if err = res.CCError(); err != nil { + fmt.Printf("do full text search migration failed, err: %v\n", err) + return err + } + + resJs, err := json.Marshal(res.Data) + if err != nil { + fmt.Printf("marshal full text search migration result(%+v) failed, err: %v\n", res.Data, err) + return err + } + + fmt.Printf("do full text search migration success, result: %sv\n", string(resJs)) + return nil +} + +type migrateResp struct { + metadata.BaseResp `json:",inline"` + Data ftypes.MigrateResult `json:"data"` +} + +func runFullTextSearchDataSync(opt *ftypes.SyncDataOption) error { + resp, err := doCmdbHttpRequest(types.CC_MODULE_SYNC, "/sync/v3/sync/full/text/search/data", opt) + if err != nil { + return err + } + + res := new(metadata.BaseResp) + if err = json.NewDecoder(resp.Body).Decode(res); err != nil { + fmt.Printf("decode response body failed, err: %v\n", err) + return err + } + + if err = res.CCError(); err != nil { + fmt.Printf("do full text search migration failed, err: %v\n", err) + return err + } + + return nil +} diff --git a/src/tools/cmdb_ctl/cmd/util.go b/src/tools/cmdb_ctl/cmd/util.go index 552a24aea7..c1e3e8ab83 100644 --- a/src/tools/cmdb_ctl/cmd/util.go +++ b/src/tools/cmdb_ctl/cmd/util.go @@ -13,7 +13,16 @@ package cmd import ( + "bytes" "fmt" + "net/http" + "strings" + + "configcenter/src/common" + "configcenter/src/common/json" + "configcenter/src/common/types" + "configcenter/src/common/util" + "configcenter/src/tools/cmdb_ctl/app/config" ) // WithRedColor TODO @@ -30,3 +39,65 @@ func WithGreenColor(str string) string { func WithBlueColor(str string) string { return fmt.Sprintf("%c[1;40;34m>> %s %c[0m\n", 0x1B, str, 0x1B) } + +func doCmdbHttpRequest(ccModule, path string, body interface{}) (*http.Response, error) { + // get server address from zk + zk, err := config.NewZkService(config.Conf.ZkAddr) + if err != nil { + fmt.Printf("new zk client failed, err: %v\n", err) + return nil, err + } + + zkPath := types.CC_SERV_BASEPATH + "/" + ccModule + children, err := zk.ZkCli.GetChildren(zkPath) + if err != nil { + fmt.Printf("get %s server failed, err: %v\n", ccModule, err) + return nil, err + } + + server := "" + for _, child := range children { + node, err := zk.ZkCli.Get(zkPath + "/" + child) + if err != nil { + return nil, err + } + svr := new(types.EventServInfo) + if err := json.Unmarshal([]byte(node), svr); err != nil { + return nil, err + } + server = fmt.Sprintf("%s:%d", svr.RegisterIP, svr.Port) + break + } + + if server == "" { + return nil, fmt.Errorf("%s server not found", ccModule) + } + + // do http request + url := fmt.Sprintf("http://%s/%s", server, strings.TrimPrefix(path, "/")) + + data, err := json.Marshal(body) + if err != nil { + fmt.Printf("marshal request body %+v failed, err: %v\n", body, err) + return nil, err + } + + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(data)) + if err != nil { + return nil, err + } + + req.Header.Add(common.BKHTTPOwnerID, "0") + req.Header.Add(common.BKHTTPHeaderUser, "cmdb_tool") + req.Header.Add("Content-Type", "application/json") + req.Header.Add(common.BKHTTPCCRequestID, util.GenerateRID()) + + client := new(http.Client) + resp, err := client.Do(req) + if err != nil { + fmt.Printf("do request failed, err: %v, url: %s, body: %s\n", err, url, string(data)) + return nil, err + } + + return resp, nil +} diff --git a/src/tools/cmdb_ctl/readme.md b/src/tools/cmdb_ctl/readme.md index dcf54207f8..ca97281a6b 100644 --- a/src/tools/cmdb_ctl/readme.md +++ b/src/tools/cmdb_ctl/readme.md @@ -416,3 +416,50 @@ denyall配置为false的情况下,limit和ttl配置才能生效 } ] ``` + +### 数据同步 + +- 使用方式 + ``` + ./tool_ctl sync [flags] + ./tool_ctl sync [command] + ``` +- 子命令 + ``` + full-text-search 全文检索数据同步 + ``` + +#### 全文检索数据同步 + +- `full-text-search`子命令的命令行参数 + ``` + --is-migrate[=false]: 初始化全文检索数据,不可以和is-sync-data参数同时设置 + --is-sync-data[=false]: 同步全文检索数据,以下参数均需要设置本参数 + --is-all[=false]: 同步全量数据,不可以和collection参数同时设置 + --index="": 需要进行数据同步的Elasticsearch Index + --collection="": 需要与Elasticsearch进行数据同步的MongoDB数据表 + --oids=[]: 需要与Elasticsearch进行数据同步的MongoDB数据ID列表,最多1000个,需要首先指定collection命令行参数 + ``` + +- 示例 + ``` + 全文检索初始化: + ./tool_ctl sync full-text-search --zk-addr=127.0.0.1:2181 --is-migrate + ``` + + ``` + 将全量的全文检索数据同步到Elasticsearch: + ./cmdb_ctl sync full-text-search --zk-addr=127.0.0.1:2181 --is-sync-data --is-all + ``` + + ``` + 将指定的索引的全文检索数据同步到Elasticsearch: + ./cmdb_ctl sync full-text-search --zk-addr=127.0.0.1:2181 --is-sync-data --index="bk_cmdb.host" + ``` + + ``` + 将指定的全文检索数据同步到Elasticsearch: + ./cmdb_ctl sync full-text-search --zk-addr=127.0.0.1:2181 --is-sync-data --collection="cc_HostBase" --oids=65291386812c38ce22236665,6535e53a225ab588783ae11c + ``` + + diff --git a/src/tools/monstache/CHANGELOG.md b/src/tools/monstache/CHANGELOG.md deleted file mode 100755 index e69de29bb2..0000000000 diff --git a/src/tools/monstache/Makefile b/src/tools/monstache/Makefile deleted file mode 100755 index eeab7742f5..0000000000 --- a/src/tools/monstache/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -TARGET = monstache-plugin - -# build -PWD = $(shell pwd) -LOCALBUILD = $(PWD)/build -OUTPUT_DIR ?= $(LOCALBUILD) - -# package -BINDIR = ${OUTPUT_DIR}/monstache - -default: - go build -buildmode=plugin -o $(BINDIR)/etc/$(TARGET).so *.go - @cp -rf etc/config.toml $(BINDIR)/etc - @cp -rf etc/extra.toml $(BINDIR)/etc - @cp -rf monstache.sh $(BINDIR) - @cp -rf CHANGELOG.md $(BINDIR) - @cp -rf README.md $(BINDIR) - -clean: - @rm -rf ${BINDIR} $(LOCALBUILD) diff --git a/src/tools/monstache/README.md b/src/tools/monstache/README.md deleted file mode 100755 index 32ff1fe5e5..0000000000 --- a/src/tools/monstache/README.md +++ /dev/null @@ -1,80 +0,0 @@ -蓝鲸CMDB全文索引Monstcache插件 -============================== - -## 概述 - -基于特定的版本包进行Monstcache和插件的部署安装; - -```shell -monstache/ -├── CHANGELOG.md -├── Makefile -├── README.md -├── build -│   └── monstache -│   ├── CHANGELOG.md -│   ├── README.md -│   ├── etc -│   │   ├── config.toml -│   │   └── monstache-plugin.so -│   ├── monstache -│   └── monstache.sh -├── etc -│   ├── config.toml -│   └── extra.toml -├── monstache.sh -└── plugin.go -``` - -## 配置 - -**Monstache config.toml配置解释** - -| 参数 | 说明 | -| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| mongo-url | MongoDB实例的主节点访问地址。详情请参见。[mongo-url](https://rwynn.github.io/monstache-site/config/#mongo-url) | -| elasticsearch-urls | Elasticsearch的访问地址。详情请参见 [elasticsearch-urls](https://rwynn.github.io/monstache-site/config/#elasticsearch-urls) | -| direct-read-namespaces | 指定待同步的集合,详情请参见[direct-read-namespaces](https://rwynn.github.io/monstache-site/config/#direct-read-namespaces)。 | -| direct-read-dynamic-include-regex | 通过正则表达式指定需要监听的集合。此设置可以用来监控符合正则表达式的集合中数据,注意:该功能是在2021-03-18日才合入rel6分支,请使用最新的rel6分支或者2021-03-18之后的release编译最新的Monstache | -| change-stream-namespaces | 如果要使用MongoDB变更流功能,需要指定此参数。启用此参数后,oplog追踪会被设置为无效,详情请参见[change-stream-namespaces](https://rwynn.github.io/monstache-site/config/#change-stream-namespaces)。 | -| namespace-regex | 通过正则表达式指定需要监听的集合。此设置可以用来监控符合正则表达式的集合中数据的变化。 | -| elasticsearch-user | 访问Elasticsearch的用户名。 | -| elasticsearch-password | 访问Elasticsearch的用户密码。 | -| elasticsearch-max-conns | 定义连接ES的线程数。默认为4,即使用4个Go线程同时将数据同步到ES。 | -| mapper-plugin-path | 启动插件相对于monstache的路径 | -| resume | 默认为false。设置为true,Monstache会将已成功同步到ES的MongoDB操作的时间戳写入monstache.monstache集合中。当Monstache因为意外停止时,可通过该时间戳恢复同步任务,避免数据丢失。如果指定了cluster-name,该参数将自动开启,详情请参见[resume](https://rwynn.github.io/monstache-site/config/#resume)。 | - - -**Monstache plugin extra.toml配置解释** - -| 参数 | 说明 | -| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| elasticsearch-shard-num | 采用插件场景下必须指定ES的Sharding number。详情请参见。[elasticsearch-shard-num](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html) | -| elasticsearch-replica-num | 采用插件场景下必须制定ES的Replica number。详情请参见。[elasticsearch-replica-num](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html) | - -阅读官方文档[monstache doc](https://rwynn.github.io/monstache-site/config/) 可以根据自己的需求进行`高级配置` -## 编译 - -进入源码根目录执行`make`或`make server`编译指令时,默认会编译后端服务涉及到的所有组件,包括monstache及其对应的monstache-plugin.so插件。您也可以进入到monstache目录,执行`make`命令单独进行monstache及其插件的编译。 -## 配置 - -monstache 涉及到的配置同样需要执行`init.py`执行,主要涉及到elasticsearch-shard-num ,elasticsearch-replica-num两个配置,其余告警配置如: `direct-read-dynamic-include-regex`、`namespace-regex`和`mapper-plugin-path`等如需变更,需要用户手动进行指定。 -## 部署安装 - -整体打包cmdb.tgz时会将插件monstache-plugin.so及对应的配置文件进行打包,monstache二进制需要您按照本文概述中的目录结构示意图进行部署。之后修改 etc/config.toml和etc/extra.toml配置内容,其中配置文件的路径是相对于二进制 `monstache`的路径,如需改动此路径请注意需要同步修改启动脚本`monstache.sh`中的配置文件启动路径。上述步骤完成后您可以通过以下方式运行: - -```shell -sh monstache.sh start -``` - -当然,也可以通过`systemd`或者简单的`nohup`方式运行, 例如 `monstache -f config.toml -mapper-plugin-path monstache-plugin.so` - -## 索引管理 - -插件将会创建附带特定版本后缀的真实ES索引,如`bk_cmdb.biz_20210701`, 并且只会在索引不存在时创建,特定版本索引的结构信息在插件代码中固定,在索引结构发生变化时插件中版本后缀也会发生变化。 -在成功创建索引后,插件会为每一个索引创建系统别名,如`bk_cmdb.biz` `bk_cmdb.set` `bk_cmdb.module` `bk_cmdb.host` `bk_cmdb.model` `bk_cmdb.object_instance`, 这些别名为蓝鲸CMDB内部索引、查询等操作所使用的别名。 - -索引分词器的指定是在plugin.go中完成,日常维护,如做reindex文档迁移后,需要将最终的真实索引和蓝鲸CMDB系统别名关联, 以保证系统能够正确处理文档数据。 - -阅读官方文档 [elastic reindex doc](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html) 了解reindex操作。 -阅读官方文档 [elastic alias doc](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html) 了解索引别名机制。 diff --git a/src/tools/monstache/build/monstache/CHANGELOG.md b/src/tools/monstache/build/monstache/CHANGELOG.md deleted file mode 100755 index e69de29bb2..0000000000 diff --git a/src/tools/monstache/build/monstache/README.md b/src/tools/monstache/build/monstache/README.md deleted file mode 100755 index 32ff1fe5e5..0000000000 --- a/src/tools/monstache/build/monstache/README.md +++ /dev/null @@ -1,80 +0,0 @@ -蓝鲸CMDB全文索引Monstcache插件 -============================== - -## 概述 - -基于特定的版本包进行Monstcache和插件的部署安装; - -```shell -monstache/ -├── CHANGELOG.md -├── Makefile -├── README.md -├── build -│   └── monstache -│   ├── CHANGELOG.md -│   ├── README.md -│   ├── etc -│   │   ├── config.toml -│   │   └── monstache-plugin.so -│   ├── monstache -│   └── monstache.sh -├── etc -│   ├── config.toml -│   └── extra.toml -├── monstache.sh -└── plugin.go -``` - -## 配置 - -**Monstache config.toml配置解释** - -| 参数 | 说明 | -| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| mongo-url | MongoDB实例的主节点访问地址。详情请参见。[mongo-url](https://rwynn.github.io/monstache-site/config/#mongo-url) | -| elasticsearch-urls | Elasticsearch的访问地址。详情请参见 [elasticsearch-urls](https://rwynn.github.io/monstache-site/config/#elasticsearch-urls) | -| direct-read-namespaces | 指定待同步的集合,详情请参见[direct-read-namespaces](https://rwynn.github.io/monstache-site/config/#direct-read-namespaces)。 | -| direct-read-dynamic-include-regex | 通过正则表达式指定需要监听的集合。此设置可以用来监控符合正则表达式的集合中数据,注意:该功能是在2021-03-18日才合入rel6分支,请使用最新的rel6分支或者2021-03-18之后的release编译最新的Monstache | -| change-stream-namespaces | 如果要使用MongoDB变更流功能,需要指定此参数。启用此参数后,oplog追踪会被设置为无效,详情请参见[change-stream-namespaces](https://rwynn.github.io/monstache-site/config/#change-stream-namespaces)。 | -| namespace-regex | 通过正则表达式指定需要监听的集合。此设置可以用来监控符合正则表达式的集合中数据的变化。 | -| elasticsearch-user | 访问Elasticsearch的用户名。 | -| elasticsearch-password | 访问Elasticsearch的用户密码。 | -| elasticsearch-max-conns | 定义连接ES的线程数。默认为4,即使用4个Go线程同时将数据同步到ES。 | -| mapper-plugin-path | 启动插件相对于monstache的路径 | -| resume | 默认为false。设置为true,Monstache会将已成功同步到ES的MongoDB操作的时间戳写入monstache.monstache集合中。当Monstache因为意外停止时,可通过该时间戳恢复同步任务,避免数据丢失。如果指定了cluster-name,该参数将自动开启,详情请参见[resume](https://rwynn.github.io/monstache-site/config/#resume)。 | - - -**Monstache plugin extra.toml配置解释** - -| 参数 | 说明 | -| --------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| elasticsearch-shard-num | 采用插件场景下必须指定ES的Sharding number。详情请参见。[elasticsearch-shard-num](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html) | -| elasticsearch-replica-num | 采用插件场景下必须制定ES的Replica number。详情请参见。[elasticsearch-replica-num](https://www.elastic.co/guide/en/elasticsearch/reference/current/index-modules.html) | - -阅读官方文档[monstache doc](https://rwynn.github.io/monstache-site/config/) 可以根据自己的需求进行`高级配置` -## 编译 - -进入源码根目录执行`make`或`make server`编译指令时,默认会编译后端服务涉及到的所有组件,包括monstache及其对应的monstache-plugin.so插件。您也可以进入到monstache目录,执行`make`命令单独进行monstache及其插件的编译。 -## 配置 - -monstache 涉及到的配置同样需要执行`init.py`执行,主要涉及到elasticsearch-shard-num ,elasticsearch-replica-num两个配置,其余告警配置如: `direct-read-dynamic-include-regex`、`namespace-regex`和`mapper-plugin-path`等如需变更,需要用户手动进行指定。 -## 部署安装 - -整体打包cmdb.tgz时会将插件monstache-plugin.so及对应的配置文件进行打包,monstache二进制需要您按照本文概述中的目录结构示意图进行部署。之后修改 etc/config.toml和etc/extra.toml配置内容,其中配置文件的路径是相对于二进制 `monstache`的路径,如需改动此路径请注意需要同步修改启动脚本`monstache.sh`中的配置文件启动路径。上述步骤完成后您可以通过以下方式运行: - -```shell -sh monstache.sh start -``` - -当然,也可以通过`systemd`或者简单的`nohup`方式运行, 例如 `monstache -f config.toml -mapper-plugin-path monstache-plugin.so` - -## 索引管理 - -插件将会创建附带特定版本后缀的真实ES索引,如`bk_cmdb.biz_20210701`, 并且只会在索引不存在时创建,特定版本索引的结构信息在插件代码中固定,在索引结构发生变化时插件中版本后缀也会发生变化。 -在成功创建索引后,插件会为每一个索引创建系统别名,如`bk_cmdb.biz` `bk_cmdb.set` `bk_cmdb.module` `bk_cmdb.host` `bk_cmdb.model` `bk_cmdb.object_instance`, 这些别名为蓝鲸CMDB内部索引、查询等操作所使用的别名。 - -索引分词器的指定是在plugin.go中完成,日常维护,如做reindex文档迁移后,需要将最终的真实索引和蓝鲸CMDB系统别名关联, 以保证系统能够正确处理文档数据。 - -阅读官方文档 [elastic reindex doc](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-reindex.html) 了解reindex操作。 -阅读官方文档 [elastic alias doc](https://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html) 了解索引别名机制。 diff --git a/src/tools/monstache/build/monstache/etc/config.toml b/src/tools/monstache/build/monstache/etc/config.toml deleted file mode 100755 index bce94a107b..0000000000 --- a/src/tools/monstache/build/monstache/etc/config.toml +++ /dev/null @@ -1,21 +0,0 @@ -# mongodb settings fill in according to the official url format of mongo. -# Note that you need to set the database name, username and password corresponding to cmdb -mongo-url = "" - -# elasticsearch settings -elasticsearch-urls = ["http://localhost:9200"] -elasticsearch-user = "" -elasticsearch-password = "" -gzip = true - -# metadata collections. -change-stream-namespaces = [""] -direct-read-namespaces = [""] -direct-read-dynamic-include-regex = "cmdb.cc_ApplicationBase$|cc_SetBase$|cc_ModuleBase$|cmdb.cc_HostBase$|cmdb.cc_ObjDes$|cc_ObjAttDes$|cmdb.cc_ObjectBase_(.*)_pub_" -namespace-regex = "cmdb.cc_ApplicationBase$|cc_SetBase$|cc_ModuleBase$|cmdb.cc_HostBase$|cmdb.cc_ObjDes$|cc_ObjAttDes$|cmdb.cc_ObjectBase_(.*)_pub_" - -# plugin -mapper-plugin-path = "etc/monstache-plugin.so" - -# resume mode -resume = true diff --git a/src/tools/monstache/build/monstache/etc/extra.toml b/src/tools/monstache/build/monstache/etc/extra.toml deleted file mode 100755 index b1e159d628..0000000000 --- a/src/tools/monstache/build/monstache/etc/extra.toml +++ /dev/null @@ -1,5 +0,0 @@ -# elasticsearch settings - -# the param must be assigned -elasticsearch-shard-num = "" -elasticsearch-replica-num = "" diff --git a/src/tools/monstache/build/monstache/monstache.sh b/src/tools/monstache/build/monstache/monstache.sh deleted file mode 100755 index 0af7ffedc7..0000000000 --- a/src/tools/monstache/build/monstache/monstache.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/sh - -# base functions. -. /etc/rc.d/init.d/functions - -# app informations. -APPBIN="monstache" -APPARGS="-f ./etc/config.toml" -BINPATH="." - -# start app. -start() { - # start daemon. - echo -n $"Starting ${APPBIN}: " - daemon "${BINPATH}/${APPBIN} ${APPARGS} &" - RET=$? - echo - return $RET -} - -# stop app. -stop() { - # stop daemon. - echo -n $"Stopping ${APPBIN}: " - killproc ${APPBIN} - RET=$? - echo - return $RET -} - -# restart app. -restart() { - # stop app. - stop - - # start app again. - start -} - -# monitor app. -monitor() { - echo -n $"Monitor ${APPBIN}: " - if [ -n "`pidofproc ${APPBIN}`" ] ; then - success $"Monitor ${APPBIN}" - echo - else - warning $"Monitor ${APPBIN} isn't running, restart it now..." - echo - start - fi -} - -# show daemon status. -status() { - ps -ef | grep -w "${APPBIN}" | grep -v grep | grep -v -w sh -} - -# switch cmd. -case "$1" in - start) - status && exit 0 - $1 - ;; - stop) - status || exit 0 - $1 - ;; - restart) - $1 - ;; - status) - $1 - ;; - monitor) - $1 - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|monitor}" - exit 2 -esac diff --git a/src/tools/monstache/etc/config.toml b/src/tools/monstache/etc/config.toml deleted file mode 100755 index ed2fa5d2a3..0000000000 --- a/src/tools/monstache/etc/config.toml +++ /dev/null @@ -1,20 +0,0 @@ -# mongodb settings fill in according to the official url format of mongo. -# Note that you need to set the database name, username and password corresponding to cmdb -mongo-url = "" - -# elasticsearch settings -elasticsearch-urls = ["http://localhost:9200"] -elasticsearch-user = "" -elasticsearch-password = "" -gzip = true - -# metadata collections. -change-stream-namespaces = [""] -direct-read-namespaces = [""] -direct-read-dynamic-include-regex = "cmdb.cc_ApplicationBase$|cc_BizSetBase$|cc_SetBase$|cc_ModuleBase$|cmdb.cc_HostBase$|cmdb.cc_ObjDes$|cc_ObjAttDes$|cmdb.cc_ObjectBase_(.*)_pub_" -namespace-regex = "cmdb.cc_ApplicationBase$|cc_BizSetBase$|cc_SetBase$|cc_ModuleBase$|cmdb.cc_HostBase$|cmdb.cc_ObjDes$|cc_ObjAttDes$|cmdb.cc_ObjectBase_(.*)_pub_" -# plugin -mapper-plugin-path = "etc/monstache-plugin.so" - -# resume mode -resume = true diff --git a/src/tools/monstache/etc/extra.toml b/src/tools/monstache/etc/extra.toml deleted file mode 100755 index b1e159d628..0000000000 --- a/src/tools/monstache/etc/extra.toml +++ /dev/null @@ -1,5 +0,0 @@ -# elasticsearch settings - -# the param must be assigned -elasticsearch-shard-num = "" -elasticsearch-replica-num = "" diff --git a/src/tools/monstache/monstache.sh b/src/tools/monstache/monstache.sh deleted file mode 100755 index 0af7ffedc7..0000000000 --- a/src/tools/monstache/monstache.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/sh - -# base functions. -. /etc/rc.d/init.d/functions - -# app informations. -APPBIN="monstache" -APPARGS="-f ./etc/config.toml" -BINPATH="." - -# start app. -start() { - # start daemon. - echo -n $"Starting ${APPBIN}: " - daemon "${BINPATH}/${APPBIN} ${APPARGS} &" - RET=$? - echo - return $RET -} - -# stop app. -stop() { - # stop daemon. - echo -n $"Stopping ${APPBIN}: " - killproc ${APPBIN} - RET=$? - echo - return $RET -} - -# restart app. -restart() { - # stop app. - stop - - # start app again. - start -} - -# monitor app. -monitor() { - echo -n $"Monitor ${APPBIN}: " - if [ -n "`pidofproc ${APPBIN}`" ] ; then - success $"Monitor ${APPBIN}" - echo - else - warning $"Monitor ${APPBIN} isn't running, restart it now..." - echo - start - fi -} - -# show daemon status. -status() { - ps -ef | grep -w "${APPBIN}" | grep -v grep | grep -v -w sh -} - -# switch cmd. -case "$1" in - start) - status && exit 0 - $1 - ;; - stop) - status || exit 0 - $1 - ;; - restart) - $1 - ;; - status) - $1 - ;; - monitor) - $1 - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|monitor}" - exit 2 -esac diff --git a/src/tools/monstache/plugin.go b/src/tools/monstache/plugin.go deleted file mode 100755 index cac99d9fe1..0000000000 --- a/src/tools/monstache/plugin.go +++ /dev/null @@ -1,1389 +0,0 @@ -/* -* Tencent is pleased to support the open source community by making 蓝鲸 available. -* Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved. -* Licensed under the MIT License (the "License"); you may not use this file except -* in compliance with the License. You may obtain a copy of the License at -* http://opensource.org/licenses/MIT -* Unless required by applicable law or agreed to in writing, software distributed under -* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, -* either express or implied. See the License for the specific language governing permissions and -* limitations under the License. - */ - -package main - -import ( - "context" - "errors" - "fmt" - "log" - "os" - "regexp" - "runtime" - "strings" - "sync" - "time" - - "configcenter/src/common" - ccjson "configcenter/src/common/json" - meta "configcenter/src/common/metadata" - "configcenter/src/common/util" - - "github.com/BurntSushi/toml" - "github.com/olivere/elastic/v7" - "github.com/rwynn/monstache/monstachemap" - "github.com/tidwall/gjson" - "go.mongodb.org/mongo-driver/bson" - "go.mongodb.org/mongo-driver/bson/primitive" -) - -// blueking cmdb elastic monstache plugin. -// build: go build -buildmode=plugin -o monstache-plugin.so plugin.go - -// elastic index versions. -// NOTE: CHANGE the version name if you have modify the indexes metadata struct. -const ( - indexVersionBizSet = "20210710" - indexVersionBiz = "20210710" - indexVersionSet = "20210710" - indexVersionModule = "20210710" - indexVersionHost = "20210710" - indexVersionModel = "20210710" - indexVersionObjectInstance = "20210710" -) - -const ( - // default metaId - nullMetaId = "0" - mongoMetaId = "_id" - mongoOptionId = "id" - mongoOptionName = "name" - mongoEnum = "enum" - mongoDatabase = "cmdb" - configPath = "./etc/extra.toml" - commonObject = "common" - // tableInstRegex 表格实例表名的正则表达式,表名生成函数: buildInstTableName - tableInstRegex = `cc_ObjectBase_(.*)_pub_bk_(.*)#(.*)` - // tableObjIdRegex 表格objId的正则表达式 - // 例: 模型实例是"host","host"上表格字段为disk,那么表格的objId为bk_host#disk - tableObjIdRegex = `bk_(.*)#(.*)` - // deleteTableQueryScript 表格实例删除脚本的条件 - // 例: 删除disk表格中实例_id为1的行 {"field": "tables.disk.1"} - deleteTableQueryScript = "tables.%s.%s" - // deleteTableScript 表格实例删除脚本, - // 例:删除disk表格中实例_id为1的行 ctx._source.tables.disk.remove('1'),如果删除后表格为空则删除表格字段 - deleteTableScript = `ctx._source.tables.%s.remove('%s'); - if (ctx._source.tables.%s.size()==0) {ctx._source.tables.remove('%s')}` - // updateTableScript 表格实例更新脚本(如果tables字段和表格字段不存在则先创建再更新) - // 例:更新disk表格中实例_id为1的行的keyword为xxx ctx._source.tables.disk['1'] = ["xxx"] - updateTableScript = `if(!ctx._source.containsKey('tables')){ctx._source['tables']=[:];} - if(!ctx._source.tables.containsKey('%s')){ctx._source.tables['%s']=[:];} - ctx._source.tables.%s['%s']=%s` -) - -// elastic indexes. -var ( - indexBizSet *meta.ESIndex - indexBiz *meta.ESIndex - indexSet *meta.ESIndex - indexModule *meta.ESIndex - indexHost *meta.ESIndex - indexModel *meta.ESIndex - indexObjectInstance *meta.ESIndex - indexList []*meta.ESIndex -) - -var ( - // tableProperties Mapping中的表格属性字段 - tableProperties string -) - -type extraConfig struct { - // assign es replicaNum - ReplicaNum string `toml:"elasticsearch-shard-num"` - - // assign es shardingNum - ShardingNum string `toml:"elasticsearch-replica-num"` -} - -type instEnumIdToName struct { - // the struct like: map[obj]map[bk_property_id]map[option.id]option.name - instEnumMap map[string]map[string]map[string]string - rw sync.RWMutex -} - -// 对于资源池等内部资源需要进行屏蔽,由于名字可能会改,所以需要通过cc_ApplicationBase表中的 defaultId为1进行判断 -type skipBizId struct { - bizIds map[int64]struct{} - rw sync.RWMutex -} - -type bizId struct { - BusinessID int64 `json:"bk_biz_id" bson:"bk_biz_id"` -} - -var ( - instEnumInfo *instEnumIdToName - skipBizIdList *skipBizId -) - -// cronInsEnumInfo TODO -// regular update instance enum ID to name -func cronInsEnumInfo(input *monstachemap.InitPluginInput) { - - // init object instance option's "id" to "name" - initEnum := func() { - instEnumInfoTmp := &instEnumIdToName{ - instEnumMap: make(map[string]map[string]map[string]string), - } - // step 1 : search all models - models := make([]map[string]interface{}, 0) - modelCursor, err := input.MongoClient.Database(mongoDatabase).Collection(common.BKTableNameObjDes). - Find(context.Background(), bson.D{}) - if err != nil { - log.Printf("query model attributes cursor failed, err: %v", err) - return - } - - if err := modelCursor.All(context.Background(), &models); err != nil { - log.Printf("query model attributes failed, err: %v", err) - return - } - - objIds := make([]string, 0) - for _, model := range models { - if obj, ok := model[common.BKObjIDField].(string); ok { - objIds = append(objIds, obj) - } - } - - // step 2: search enum and bk_property_id in model attribute. - for _, obj := range objIds { - - // query model attribute. - modelAttrs := make([]map[string]interface{}, 0) - - modelAttrsCursor, err := input.MongoClient.Database(mongoDatabase).Collection(common.BKTableNameObjAttDes). - Find(context.Background(), bson.D{{common.BKObjIDField, obj}, - {common.BKPropertyTypeField, mongoEnum}}) - if err != nil { - return - } - if err := modelAttrsCursor.All(context.Background(), &modelAttrs); err != nil { - return - } - instEnumInfoTmp.instEnumMap[obj] = make(map[string]map[string]string) - tmpPropertyIDMap := make(map[string]map[string]string) - - for _, modelAttr := range modelAttrs { - optionMap := make(map[string]string) - if _, ok := modelAttr[common.BKPropertyIDField].(string); !ok { - continue - } - if attr, ok := modelAttr[common.BKOptionField].(primitive.A); ok { - opts := []interface{}(attr) - for _, opt := range opts { - // option.id:option.name - if o, ok := opt.(map[string]interface{}); ok { - if _, ok := o[mongoOptionName].(string); ok { - optionMap[o[mongoOptionId].(string)] = o[mongoOptionName].(string) - } - } - } - } - tmpPropertyIDMap[modelAttr[common.BKPropertyIDField].(string)] = optionMap - } - instEnumInfoTmp.instEnumMap[obj] = tmpPropertyIDMap - } - - instEnumInfo.rw.Lock() - defer instEnumInfo.rw.Unlock() - instEnumInfo.instEnumMap = instEnumInfoTmp.instEnumMap - log.Println("update instEnumInfo successfully") - return - } - - for { - initEnum() - err := initSkipBizId(input) - if err != nil { - log.Printf("init resource pool fail, err: %v", err) - os.Exit(1) - } - time.Sleep(time.Minute) - } - -} - -// newESIndexMetadata new es index metadata -func newESIndexMetadata(config extraConfig) *meta.ESIndexMetadata { - return &meta.ESIndexMetadata{ - Settings: meta.ESIndexMetaSettings{ - Shards: config.ShardingNum, - Replicas: config.ReplicaNum, - }, - Mappings: meta.ESIndexMetaMappings{ - Properties: map[string]meta.ESIndexMetaMappingsProperty{ - meta.IndexPropertyID: { - PropertyType: meta.IndexPropertyTypeKeyword, - }, - meta.IndexPropertyBKObjID: { - PropertyType: meta.IndexPropertyTypeKeyword, - }, - meta.IndexPropertyBKSupplierAccount: { - PropertyType: meta.IndexPropertyTypeKeyword, - }, - meta.IndexPropertyBKBizID: { - PropertyType: meta.IndexPropertyTypeKeyword, - }, - meta.IndexPropertyKeywords: { - PropertyType: meta.IndexPropertyTypeKeyword, - }, - }, - }, - } -} - -func init() { - // initialize each index for this release version plugin. - var config extraConfig - _, err := toml.DecodeFile(configPath, &config) - if err != nil { - panic(err) - } - - if config.ShardingNum == "" || config.ReplicaNum == "" { - panic(fmt.Sprintln("es shardingNum or replicaNum is not config!")) - } - - instEnumInfo = &instEnumIdToName{ - instEnumMap: make(map[string]map[string]map[string]string), - } - skipBizIdList = &skipBizId{ - bizIds: make(map[int64]struct{}), - } - - // biz set index. - indexBizSetMetadata := newESIndexMetadata(config) - indexBizSetMetadata.Mappings.Properties[meta.IndexPropertyBKBizSetID] = meta.ESIndexMetaMappingsProperty{ - PropertyType: meta.IndexPropertyTypeKeyword, - } - // init indexBizSetMetadata, but biz set not meta.IndexPropertyBKBizID field, delete it - delete(indexBizSetMetadata.Mappings.Properties, meta.IndexPropertyBKBizID) - indexBizSet = meta.NewESIndex(meta.IndexNameBizSet, indexVersionBizSet, indexBizSetMetadata) - indexList = append(indexList, indexBizSet) - - // business application index. - indexBizMetadata := newESIndexMetadata(config) - indexBiz = meta.NewESIndex(meta.IndexNameBiz, indexVersionBiz, indexBizMetadata) - indexList = append(indexList, indexBiz) - - // set index. - indexSetMetadata := newESIndexMetadata(config) - indexSetMetadata.Mappings.Properties[meta.IndexPropertyBKParentID] = meta.ESIndexMetaMappingsProperty{ - PropertyType: meta.IndexPropertyTypeKeyword, - } - indexSet = meta.NewESIndex(meta.IndexNameSet, indexVersionSet, indexSetMetadata) - indexList = append(indexList, indexSet) - - // module index. - indexModuleMetadata := newESIndexMetadata(config) - indexModule = meta.NewESIndex(meta.IndexNameModule, indexVersionModule, indexModuleMetadata) - indexList = append(indexList, indexModule) - - // host index. - indexHostMetadata := newESIndexMetadata(config) - indexHostMetadata.Mappings.Properties[meta.IndexPropertyBKCloudID] = meta.ESIndexMetaMappingsProperty{ - PropertyType: meta.IndexPropertyTypeKeyword, - } - // init indexHostMetadata, but host is not meta.IndexPropertyBKBizID field, delete it - delete(indexHostMetadata.Mappings.Properties, meta.IndexPropertyBKBizID) - indexHost = meta.NewESIndex(meta.IndexNameHost, indexVersionHost, indexHostMetadata) - indexList = append(indexList, indexHost) - - // model index. - indexModelMetadata := newESIndexMetadata(config) - indexModel = meta.NewESIndex(meta.IndexNameModel, indexVersionModel, indexModelMetadata) - indexList = append(indexList, indexModel) - - // object instance index. - indexObjInstMetadata := newESIndexMetadata(config) - indexObjectInstance = meta.NewESIndex(meta.IndexNameObjectInstance, indexVersionObjectInstance, - indexObjInstMetadata) - indexList = append(indexList, indexObjectInstance) - - // init table properties - mappings := meta.ESIndexMetaMappings{Properties: make(map[string]meta.ESIndexMetaMappingsProperty)} - mappings.Properties[meta.TablePropertyName] = meta.ESIndexMetaMappingsProperty{ - PropertyType: meta.IndexPropertyTypeObject, - } - marshal, err := ccjson.MarshalToString(mappings) - if err != nil { - panic(err) - } - tableProperties = marshal - - log.Println("bk-cmdb elastic monstache plugin initialize successfully") -} - -// analysisJSONKeywords analysis the given json style document, and extract -// all the keywords as elastic document content. -func analysisJSONKeywords(result gjson.Result) []string { - - keywords := make([]string, 0) - if !result.IsObject() && !result.IsArray() { - keywords = append(keywords, result.String()) - return keywords - } - - result.ForEach(func(key, value gjson.Result) bool { - keywords = append(keywords, analysisJSONKeywords(value)...) - return true - }) - - return keywords -} - -// compressKeywords compress the keywords return without repetition. -func compressKeywords(keywords []string) []string { - - compressedKeywords := make([]string, 0) - // keywordsMap control repeated or screened keywords. - keywordsMap := make(map[string]struct{}) - for _, keyword := range keywords { - if _, exist := keywordsMap[keyword]; exist { - continue - } - compressedKeywords = append(compressedKeywords, keyword) - keywordsMap[keyword] = struct{}{} - } - - return compressedKeywords -} - -// getMetaIdToStr objID/hostID/setID/moduleID/instanceID/bizID convert to string. -func getMetaIdToStr(d interface{}) (string, error) { - if d == nil { - return "", errors.New("document id is nil") - } - return fmt.Sprintf("%v", d), nil -} - -// baseDataCleaning do not need to sync "_id","create_time","last_time","bk_supplier_account". -func baseDataCleaning(document map[string]interface{}) map[string]interface{} { - delete(document, mongoMetaId) - delete(document, common.CreateTimeField) - delete(document, common.LastTimeField) - delete(document, common.BKOwnerIDField) - return document -} - -// originalDataCleaning some field do not need to save es,delete it. -func originalDataCleaning(document map[string]interface{}, collection string) map[string]interface{} { - - if document == nil { - return nil - } - - doc := make(map[string]interface{}) - - switch collection { - case common.BKTableNameBaseBizSet: - doc = baseDataCleaning(document) - // do not need to sync "default". - delete(doc, common.BKDefaultField) - delete(doc, common.BKBizSetScopeField) - - case common.BKTableNameBaseApp: - doc = baseDataCleaning(document) - // do not need to sync "default". - delete(doc, common.BKDefaultField) - delete(doc, common.BKParentIDField) - - case common.BKTableNameBaseSet: - - doc = baseDataCleaning(document) - - // do not need to sync "default","set_template_id","bk_biz_id","bk_parent_id". - delete(doc, common.BKAppIDField) - delete(doc, common.BKParentIDField) - delete(doc, common.BKSetTemplateIDField) - delete(doc, common.BKDefaultField) - - case common.BKTableNameBaseModule: - doc = baseDataCleaning(document) - - // do not need to sync "default","set_template_id","bk_biz_id","bk_parent_id","bk_set_id","service_category_id". - delete(doc, common.BKDefaultField) - delete(doc, common.BKSetTemplateIDField) - delete(doc, common.BKAppIDField) - delete(doc, common.BKParentIDField) - delete(doc, common.BKSetIDField) - delete(doc, common.BKServiceCategoryIDField) - - case common.BKTableNameBaseHost: - - doc = baseDataCleaning(document) - // do not need to sync "operation_time". - delete(doc, common.BKOperationTimeField) - delete(doc, common.BKParentIDField) - - case common.BKTableNameObjDes: - - // need to sync "bk_obj_name" and "bk_obj_id". - doc[common.BKObjIDField] = document[common.BKObjIDField] - doc[common.BKObjNameField] = document[common.BKObjNameField] - - case common.BKTableNameObjAttDes: - - // need to sync "bk_property_id" and "bk_property_name". - doc[common.BKPropertyIDField] = document[common.BKPropertyIDField] - doc[common.BKPropertyNameField] = document[common.BKPropertyNameField] - - default: - doc = baseDataCleaning(document) - // do not need to sync "bk_obj_id" for common object instance. - delete(doc, common.BKObjIDField) - delete(doc, common.BKParentIDField) - } - - return doc -} - -// getModeNameByCollection parse the innerObjId from collection name. -func getModeNameByCollection(collection string) (innerObjId string) { - - switch collection { - case common.BKTableNameBaseBizSet: - innerObjId = common.BKInnerObjIDBizSet - case common.BKTableNameBaseHost: - innerObjId = common.BKInnerObjIDHost - case common.BKTableNameBaseApp: - innerObjId = common.BKInnerObjIDApp - case common.BKTableNameBaseSet: - innerObjId = common.BKInnerObjIDSet - - case common.BKTableNameBaseModule: - innerObjId = common.BKInnerObjIDModule - default: - if common.IsObjectInstShardingTable(collection) { - tmp := strings.TrimLeft(collection, common.BKObjectInstShardingTablePrefix) - instSlice := strings.Split(tmp, "_") - if len(instSlice) >= 3 { - innerObjId = strings.Join(instSlice[2:], "_") - } - } - } - return innerObjId -} - -// enumIdToName parse enum Id to Name. -func enumIdToName(document map[string]interface{}, collection string) { - - key := getModeNameByCollection(collection) - if key == "" { - return - } - instEnumInfo.rw.RLock() - defer instEnumInfo.rw.RUnlock() - // deal enum map[string]map[string]map[string]string - for propertyId, enumInfo := range instEnumInfo.instEnumMap[key] { - if _, ok := document[propertyId]; ok { - if v, ok := document[propertyId].(string); ok { - document[propertyId] = enumInfo[v] - } - } - } - return -} - -// analysisDocument analysis the given document, return document id and keywords. -func analysisDocument(document map[string]interface{}, collection string) (string, []string, error) { - - var id string - // analysis collection document id. - switch collection { - case common.BKTableNameBaseBizSet: - bizSetId, err := getMetaIdToStr(document[common.BKBizSetIDField]) - if err != nil { - return "", nil, fmt.Errorf("missing: %s, err: %v", common.BKBizSetIDField, err) - } - id = bizSetId - case common.BKTableNameBaseApp: - bizId, err := getMetaIdToStr(document[common.BKAppIDField]) - if err != nil { - return "", nil, fmt.Errorf("missing: %s, err: %v", common.BKAppIDField, err) - } - id = bizId - case common.BKTableNameBaseSet: - - setId, err := getMetaIdToStr(document[common.BKSetIDField]) - if err != nil { - return "", nil, fmt.Errorf("missing: %s, err: %v", common.BKSetIDField, err) - } - id = setId - case common.BKTableNameBaseModule: - - moduleId, err := getMetaIdToStr(document[common.BKModuleIDField]) - if err != nil { - return "", nil, fmt.Errorf("missing: %s, err: %v", common.BKModuleIDField, err) - } - id = moduleId - - case common.BKTableNameBaseHost: - hostId, err := getMetaIdToStr(document[common.BKHostIDField]) - if err != nil { - return "", nil, fmt.Errorf("missing: %s, err: %v", common.BKHostIDField, err) - } - id = hostId - - case common.BKTableNameObjDes, common.BKTableNameObjAttDes: - objId, err := getMetaIdToStr(document[common.BKObjIDField]) - if err != nil { - return "", nil, fmt.Errorf("missing: %s, err: %v", common.BKObjIDField, err) - } - id = objId - default: - instId, err := getMetaIdToStr(document[common.BKInstIDField]) - if err != nil { - return "", nil, fmt.Errorf("missing: %s, err: %v", common.BKInstIDField, err) - } - id = instId - } - // in the instance scenario, the enumeration values need to be converted - if collection != common.BKTableNameObjDes { - enumIdToName(document, collection) - } - - doc := originalDataCleaning(document, collection) - if doc == nil { - return "", nil, errors.New("there is no document") - } - // analysis keywords. - jsonDoc, err := ccjson.MarshalToString(doc) - if err != nil { - return "", nil, err - } - keywords := analysisJSONKeywords(gjson.Parse(jsonDoc)) - - // return document id and compressed keywords. - return id, compressKeywords(keywords), nil -} - -// outputDocument return output document -func outputDocument(input *monstachemap.MapperPluginInput, output *monstachemap.MapperPluginOutput, objID, - esObjID string) (map[string]interface{}, error) { - oId := input.Document[common.BKOwnerIDField] - metaId := input.Document[mongoMetaId] - bizId := input.Document[common.BKAppIDField] - - // analysis document. - id, keywords, err := analysisDocument(input.Document, input.Collection) - if err != nil { - return nil, fmt.Errorf("analysis output document failed, document: %+v, err: %v", input.Document, err) - } - - // build elastic document. - document := map[string]interface{}{ - meta.IndexPropertyID: id, - meta.IndexPropertyDataKind: meta.DataKindInstance, - meta.IndexPropertyBKObjID: objID, - meta.IndexPropertyBKSupplierAccount: oId, - meta.IndexPropertyBKBizID: bizId, - meta.IndexPropertyKeywords: keywords, - } - - documentID, ok := metaId.(primitive.ObjectID) - if !ok { - return nil, errors.New("missing document metadata id") - } - idEs := fmt.Sprintf("%s:%s", documentID.Hex(), esObjID) - output.ID = idEs - - return document, nil -} - -// indexingBizSet indexing the business set instance. -func indexingBizSet(input *monstachemap.MapperPluginInput, output *monstachemap.MapperPluginOutput) error { - - bizSetId := input.Document[common.BKBizSetIDField] - - document, err := outputDocument(input, output, common.BKInnerObjIDBizSet, common.BKInnerObjIDBizSet) - if err != nil { - return fmt.Errorf("get biz set output document failed, err: %v", err) - } - document[meta.IndexPropertyBKBizSetID] = bizSetId - delete(document, meta.IndexPropertyBKBizID) - - output.Document = document - // use alias name to indexing document. - output.Index = indexBizSet.AliasName() - - return nil -} - -// indexingApplication indexing the business application instance. -func indexingApplication(input *monstachemap.MapperPluginInput, output *monstachemap.MapperPluginOutput) error { - - document, err := outputDocument(input, output, common.BKInnerObjIDApp, common.BKInnerObjIDApp) - if err != nil { - return fmt.Errorf("get biz output document failed, err: %v", err) - } - - output.Document = document - // use alias name to indexing document. - output.Index = indexBiz.AliasName() - - return nil -} - -// indexingSet indexing the set instance. -func indexingSet(input *monstachemap.MapperPluginInput, output *monstachemap.MapperPluginOutput) error { - - pId := input.Document[common.BKParentIDField] - - document, err := outputDocument(input, output, common.BKInnerObjIDSet, common.BKInnerObjIDSet) - if err != nil { - return fmt.Errorf("get set output document failed, err: %v", err) - } - document[meta.IndexPropertyBKParentID] = pId - - output.Document = document - // use alias name to indexing document. - output.Index = indexSet.AliasName() - - return nil -} - -// indexingModule indexing the module instance. -func indexingModule(input *monstachemap.MapperPluginInput, output *monstachemap.MapperPluginOutput) error { - - document, err := outputDocument(input, output, common.BKInnerObjIDModule, common.BKInnerObjIDModule) - if err != nil { - return fmt.Errorf("get module output document failed, err: %v", err) - } - - output.Document = document - // use alias name to indexing document. - output.Index = indexModule.AliasName() - - return nil -} - -// indexingHost indexing the host instance. -func indexingHost(input *monstachemap.MapperPluginInput, output *monstachemap.MapperPluginOutput) error { - - document, err := outputDocument(input, output, common.BKInnerObjIDHost, common.BKInnerObjIDHost) - if err != nil { - return fmt.Errorf("get host output document failed, err: %v", err) - } - document[meta.IndexPropertyBKCloudID] = input.Document[common.BKCloudIDField] - delete(document, meta.IndexPropertyBKBizID) - - output.Document = document - // use alias name to indexing document. - output.Index = indexHost.AliasName() - - return nil -} - -// indexingModel indexing the model/attr instance. -func indexingModel(input *monstachemap.MapperPluginInput, output *monstachemap.MapperPluginOutput) error { - - objectID, ok := input.Document[common.BKObjIDField].(string) - if !ok { - return fmt.Errorf("analysis model document failed, object id missing, %+v", input.Document) - } - - if IsTableObjId(objectID) { - output.Skip = true - return nil - } - - // query model. - model := make(map[string]interface{}) - - if err := input.MongoClient.Database(input.Database).Collection(common.BKTableNameObjDes). - FindOne(context.Background(), bson.D{{common.BKObjIDField, objectID}}). - Decode(&model); err != nil { - return fmt.Errorf("query model object[%s] failed, %v", objectID, err) - } - - oId, bizId, metaId := model[common.BKOwnerIDField], model[common.BKAppIDField], model[mongoMetaId] - - // analysis model document. - _, keywords, err := analysisDocument(model, common.BKTableNameObjDes) - if err != nil { - return fmt.Errorf("analysis model document failed, %+v, %v", input.Document, err) - } - - // query model attribute. - modelAttrs, tableAttrs := make([]map[string]interface{}, 0), make([]map[string]interface{}, 0) - - modelAttrsCursor, err := input.MongoClient.Database(input.Database).Collection(common.BKTableNameObjAttDes). - Find(context.Background(), bson.D{{common.BKObjIDField, objectID}}) - if err != nil { - return fmt.Errorf("query model attributes object[%s] cursor failed, %v", objectID, err) - } - - if err := modelAttrsCursor.All(context.Background(), &modelAttrs); err != nil { - return fmt.Errorf("query model attributes object[%s] failed, %v", objectID, err) - } - - // all attributes with model metadata is ONE elastic document. - for _, attribute := range modelAttrs { - if err := validateAttribute(attribute); err != nil { - log.Printf("validate attributes object[%s] property type failed, %+v, %v", objectID, attribute, err) - continue - } - - propertyType, err := getMetaIdToStr(attribute[common.BKPropertyTypeField]) - if err != nil { - log.Printf("get model attributes object[%s] property type failed, %+v, %v", objectID, attribute, err) - continue - } - if propertyType == common.FieldTypeInnerTable { - tableAttrs = append(tableAttrs, attribute) - } - - // data Cleaning - jsonDoc, err := ccjson.MarshalToString(originalDataCleaning(attribute, common.BKTableNameObjAttDes)) - if err != nil { - log.Printf("marshal model attributes object[%s] failed, %+v, %v", objectID, attribute, err) - continue - } - keywords = append(keywords, analysisJSONKeywords(gjson.Parse(jsonDoc))...) - } - documentID, ok := metaId.(primitive.ObjectID) - if !ok { - return errors.New("missing document metadata id") - } - idEs := fmt.Sprintf("%s:%s", documentID.Hex(), common.BKInnerObjIDObject) - - // build elastic document. - document := map[string]interface{}{ - // model scene,we use meta_bk_obj_id to search mongo,this id set null. - meta.IndexPropertyID: nullMetaId, - meta.IndexPropertyDataKind: meta.DataKindModel, - meta.IndexPropertyBKObjID: objectID, - meta.IndexPropertyBKSupplierAccount: oId, - meta.IndexPropertyBKBizID: bizId, - meta.IndexPropertyKeywords: compressKeywords(keywords), - } - err = updateModelTableProperties(document, tableAttrs) - if err != nil { - return fmt.Errorf("update model table properties failed, err: %v", err) - } - output.ID = idEs - output.Document = document - // use alias name to indexing document. - output.Index = indexModel.AliasName() - - return nil -} - -// indexingObjectInstance indexing the common object instance. -func indexingObjectInstance(input *monstachemap.MapperPluginInput, output *monstachemap.MapperPluginOutput) error { - - objId := input.Document[common.BKObjIDField] - bizId := input.Document[common.BKAppIDField] - oId := input.Document[common.BKOwnerIDField] - metaId := input.Document[mongoMetaId] - - // analysis document. - id, keywords, err := analysisDocument(input.Document, input.Collection) - if err != nil { - return fmt.Errorf("analysis object instance document failed, %+v, %v", input.Document, err) - } - - // build elastic document. - document := map[string]interface{}{ - meta.IndexPropertyID: id, - meta.IndexPropertyDataKind: meta.DataKindInstance, - meta.IndexPropertyBKObjID: objId, - meta.IndexPropertyBKSupplierAccount: oId, - meta.IndexPropertyBKBizID: bizId, - meta.IndexPropertyKeywords: keywords, - } - - documentID, ok := metaId.(primitive.ObjectID) - if !ok { - return errors.New("missing document metadata id") - } - idEs := fmt.Sprintf("%s:%s", documentID.Hex(), commonObject) - output.ID = idEs - - output.Document = document - // use alias name to indexing document. - output.Index = indexObjectInstance.AliasName() - - return nil -} - -// initSkipBizId TODO -// the internal resource pool does not need to be displayed externally. The ID corresponding to the internal resource -// pool is saved. When writing to es from Mongo, the relevant doc needs to be masked. -func initSkipBizId(input *monstachemap.InitPluginInput) error { - - bizInfo := make([]bizId, 0) - appCursor, err := input.MongoClient.Database(mongoDatabase).Collection(common.BKTableNameBaseApp). - Find(context.Background(), bson.D{{common.BKDefaultField, 1}}) - if err != nil { - return fmt.Errorf("query app database appCursor fail, err: %v", err) - } - - if err := appCursor.All(context.Background(), &bizInfo); err != nil { - return fmt.Errorf("query app database fail, err: %v", err) - } - if len(bizInfo) == 0 { - return errors.New("query list num is zero") - } - skipBizIdList.rw.Lock() - defer skipBizIdList.rw.Unlock() - - for _, v := range bizInfo { - skipBizIdList.bizIds[v.BusinessID] = struct{}{} - } - log.Printf("initSkipBizId success, bizId: %v", bizInfo) - return nil -} - -// Init function, when you implement a Init function, it would load and call this function with the initialized -// mongo/elastic clients. And you could do some initialization for elasticsearch or mongodb here. -func Init(input *monstachemap.InitPluginInput) error { - - go cronInsEnumInfo(input) - - // initialize elastic indexes. - for _, index := range indexList { - // check elastic index. - exist, err := input.ElasticClient.IndexExists(index.Name()).Do(context.Background()) - if err != nil { - return fmt.Errorf("check elastic index[%s] existence failed, %v", index.Name(), err) - } - - if !exist { - // NOTE: create new index with the target index name, and it may be a alias index name, - // the policies are all by user. - _, err = input.ElasticClient.CreateIndex(index.Name()).Body(index.Metadata()).Do(context.Background()) - if err != nil { - return fmt.Errorf("create elastic index[%s] failed, %v", index.Name(), err) - } - } - - // check elastic alias name. - // it's ok if the alias name index is already exist, but the alias name could not be a real index. - _, err = input.ElasticClient.Alias().Add(index.Name(), index.AliasName()).Do(context.Background()) - if err != nil { - return fmt.Errorf("create elastic index[%s] alias failed, %v", index.Name(), err) - } - - // add table properties if not exist - exist, err = isTablePropertyFieldExist(index, input) - if err != nil { - return fmt.Errorf("check table properties index[%s] failed, %v", index.Name(), err) - } - if !exist { - _, err = input.ElasticClient.PutMapping(). - BodyString(tableProperties). - Index(index.Name()). - Do(context.Background()) - if err != nil { - return fmt.Errorf("add table properties index[%s] failed, %v", index.Name(), err) - } - } - } - - log.Println("initialize elastic indexes successfully") - - return nil -} - -// Map function, when you implement a Map function, you could handle each event document base on the -// plugin input, the input parameter will contain information about the document's origin database and -// collection, and mapping the elastic index document in output. -func Map(input *monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error) { - - defer func() { - if errRecover := recover(); errRecover != nil { - buf := make([]byte, 1<<16) - runtime.Stack(buf, true) - log.Printf("map data panic, buf: %v", string(buf)) - } - }() - - // discard all internal resource pool class docs. - if input.Collection == common.BKTableNameBaseApp || input.Collection == common.BKTableNameBaseSet { - bizId := input.Document[common.BKAppIDField] - if bizId != nil { - skipBizIdList.rw.RLock() - defer skipBizIdList.rw.RUnlock() - bId, err := util.GetInt64ByInterface(bizId) - if err != nil { - log.Printf("bizId convert fail, bizId: %v, err: %v", bizId, err) - return nil, err - } - - if _, exist := skipBizIdList.bizIds[bId]; exist { - return nil, nil - } - } - } - - return mapping(input) -} - -func mapping(input *monstachemap.MapperPluginInput) (*monstachemap.MapperPluginOutput, error) { - output := new(monstachemap.MapperPluginOutput) - - switch input.Collection { - case common.BKTableNameBaseBizSet: - if err := indexingBizSet(input, output); err != nil { - return nil, err - } - - case common.BKTableNameBaseApp: - if err := indexingApplication(input, output); err != nil { - return nil, err - } - - case common.BKTableNameBaseSet: - if err := indexingSet(input, output); err != nil { - return nil, err - } - - case common.BKTableNameBaseModule: - if err := indexingModule(input, output); err != nil { - return nil, err - } - - case common.BKTableNameBaseHost: - if err := indexingHost(input, output); err != nil { - return nil, err - } - - case common.BKTableNameObjDes, common.BKTableNameObjAttDes: - if err := indexingModel(input, output); err != nil { - return nil, err - } - return output, nil - - default: - if !common.IsObjectShardingTable(input.Collection) { - // unknown collection, just drop it. - output.Drop = true - return output, nil - } - - // if collection is a table inst collection - if IsTableInstCollection(input.Collection) { - if err := indexingTableInst(input, output); err != nil { - return nil, err - } - output.Skip = true - return output, nil - } - - if err := indexingObjectInstance(input, output); err != nil { - return nil, err - } - } - output.Skip = true - UpsertEsDoc(input, output) - - return output, nil -} - -// Process function, when you implement a Process function, the function will be called after monstache processes each -// event. This function has full access to the MongoDB and Elasticsearch clients ( -// including the Elasticsearch bulk processor) in the input and allows you to handle complex event processing scenarios -func Process(input *monstachemap.ProcessPluginInput) error { - req := elastic.NewBulkDeleteRequest() - - objectID, index, err := getEsIdFromDoc(input.Document, input.Collection) - if err != nil { - log.Printf("get es id failed, err: %v", err) - return err - } - - if input.Operation == "d" { - if IsTableInstCollection(input.Collection) { - return indexingDeletedTableInst(input) - } - req.Id(objectID) - req.Index(index) - input.ElasticBulkProcessor.Add(req) - } - - return nil -} - -// isTablePropertyFieldExist check if the collection is a table property instance collection. -func isTablePropertyFieldExist(index *meta.ESIndex, input *monstachemap.InitPluginInput) (bool, error) { - do, err := input.ElasticClient.GetMapping(). - Index(index.Name()). - Do(context.Background()) - if err != nil { - return false, err - } - // 检查 mapping中是否存在tables属性 - // 例: {"mappings":{"properties":{"tables":{"type":"object"}}}} - for _, indexMap := range do { - marshal, err := ccjson.Marshal(indexMap) - if err != nil { - return false, err - } - var indexMetadata meta.ESIndexMetadata - err = ccjson.Unmarshal(marshal, &index) - if err != nil { - return false, err - } - for property := range indexMetadata.Mappings.Properties { - if property == meta.TablePropertyName { - return true, nil - } - } - } - - return false, nil -} - -// getTablePropertyIdAndObjId get table propertyId and objId from collection name. -func getTablePropertyIdAndObjId(collection string) (string, string) { - regex := regexp.MustCompile(tableInstRegex) - if regex.MatchString(collection) { - matches := regex.FindStringSubmatch(collection) - return matches[3], matches[2] - } - return "", "" -} - -// getMongoCollectionByObjID get mongo collection name by objID. -func getMongoCollectionByObjID(objID string, supplierAccount string) string { - var collection string - switch objID { - case common.BKInnerObjIDBizSet: - collection = common.BKInnerObjIDBizSet - case common.BKInnerObjIDHost: - collection = common.BKTableNameBaseHost - case common.BKInnerObjIDApp: - collection = common.BKTableNameBaseApp - case common.BKInnerObjIDSet: - collection = common.BKTableNameBaseSet - case common.BKInnerObjIDModule: - collection = common.BKTableNameBaseModule - default: - collection = common.GetObjectInstTableName(objID, supplierAccount) - } - return collection -} - -// UpdateTablePropertyEsDoc update table property es doc. -func UpdateTablePropertyEsDoc(input *monstachemap.MapperPluginInput, - index, instId, propertyId, tableId string, keywords []string) (bool, error) { - keywordStr, err := ccjson.MarshalToString(keywords) - if err != nil { - return false, err - } - do, err := input.ElasticClient.UpdateByQuery(index). - ProceedOnVersionConflict(). - Query(elastic.NewMatchQuery(meta.IndexPropertyID, instId)). - Script(elastic.NewScriptInline(fmt.Sprintf(updateTableScript, - propertyId, propertyId, propertyId, tableId, keywordStr))). - Do(context.Background()) - if err != nil { - return false, err - } - return do.Total == 1, nil -} - -// deleteTablePropertyEsDoc delete table property instance from es. -func deleteTablePropertyEsDoc(input *monstachemap.ProcessPluginInput, index, propertyId, tableId string) error { - _, err := input.ElasticClient.UpdateByQuery(index). - ProceedOnVersionConflict(). - Query(elastic.NewExistsQuery(fmt.Sprintf(deleteTableQueryScript, propertyId, tableId))). - Script(elastic.NewScriptInline(fmt.Sprintf(deleteTableScript, propertyId, tableId, propertyId, propertyId))). - Do(context.Background()) - if err != nil { - log.Printf("update document failed, err: %v", err) - return err - } - return nil -} - -// UpsertEsDoc upsert document to elastic. -func UpsertEsDoc(input *monstachemap.MapperPluginInput, output *monstachemap.MapperPluginOutput) { - input.ElasticBulkProcessor.Add(elastic.NewBulkUpdateRequest(). - Index(output.Index). - DocAsUpsert(true). - RetryOnConflict(10). - Doc(output.Document). - Id(output.ID)) -} - -// indexingDeletedTableInst index deleted table property instance. -func indexingDeletedTableInst(input *monstachemap.ProcessPluginInput) error { - documentID, ok := input.Document[mongoMetaId].(primitive.ObjectID) - if !ok { - return errors.New("missing document metadata id") - } - tableId := documentID.Hex() - propertyId, objId := getTablePropertyIdAndObjId(input.Collection) - if propertyId == "" || objId == "" { - return fmt.Errorf("invalid table property collection: %s", input.Collection) - } - index := getEsIndexByObjId(objId) - err := deleteTablePropertyEsDoc(input, index, propertyId, tableId) - if err != nil { - log.Printf("delete table property es document failed, err: %v", err) - return err - } - return nil -} - -// indexingTableInst indexing the table property instance. -func indexingTableInst(input *monstachemap.MapperPluginInput, output *monstachemap.MapperPluginOutput) error { - instIdStr, err := getMetaIdToStr(input.Document[common.BKInstIDField]) - if err != nil { - return fmt.Errorf("missing: %s, err: %v", common.BKInstIDField, err) - } - instId, err := util.GetInt64ByInterface(instIdStr) - if err != nil { - log.Printf("get inst id failed, err: %v", err) - } - // Note: instId == 0 表明表格实例未于模型实例表进行关联,无需处理 - if instId == 0 { - return nil - } - - documentID, ok := input.Document[mongoMetaId].(primitive.ObjectID) - if !ok { - return errors.New("missing document metadata id") - } - tableId := documentID.Hex() - - account, err := getMetaIdToStr(input.Document[common.BKOwnerIDField]) - if err != nil { - return fmt.Errorf("missing: %s, err: %v", common.BKOwnerIDField, err) - } - - // todo 后续需要通过引用表 - propertyId, objId := getTablePropertyIdAndObjId(input.Collection) - if propertyId == "" || objId == "" { - return fmt.Errorf("invalid table property collection: %s", input.Collection) - } - index := getEsIndexByObjId(objId) - - document, keywords, err := analysisTableDocument(propertyId, tableId, input.Document) - if err != nil { - log.Printf("analysis table document failed, err: %v", err) - return err - } - - // 直接更新 es文档 - succeed, err := UpdateTablePropertyEsDoc(input, index, instIdStr, propertyId, tableId, keywords) - if err != nil { - log.Printf("update table property es doc failed, err: %v", err) - return err - } - - // 更新败降级处理,查询实例数据,如果es文档不存在,直接创建es文档 - if !succeed { - collection := getMongoCollectionByObjID(objId, account) - idType := getDocumentIdType(collection) - id, err := getEsIDByMongoID(input, collection, idType, instId) - if err != nil { - log.Printf("get es id by mongo id failed, collection: %s, idType: %s, id: %d, err: %v", - collection, idType, instId, err) - return err - } - output.ID = id - output.Document = document - output.Index = index - UpsertEsDoc(input, output) - } - return nil -} - -// getEsIDByMongoID get the es id by mongo document id. -// 如果mongo的实例数据不存在,说明是脏数据,直接返回错误。 -func getEsIDByMongoID(input *monstachemap.MapperPluginInput, collection, idType string, id int64) (string, error) { - doc := make(map[string]interface{}, 0) - err := input.MongoClient.Database(mongoDatabase). - Collection(collection). - FindOne(context.Background(), bson.D{{idType, id}}).Decode(&doc) - if err != nil { - log.Printf("get document failed, collection: %s, id: %d, err: %v", collection, id, err) - return "", err - } - objectID, _, err := getEsIdFromDoc(doc, collection) - if err != nil { - log.Printf("get es id failed, err: %v", err) - return "", err - } - return objectID, nil -} - -// analysisTableDocument analysis the table property document. -func analysisTableDocument(propertyId, tableId string, originDoc map[string]interface{}) ( - map[string]interface{}, []string, error) { - document := make(map[string]interface{}) - keywords := make([]string, 0) - originDoc = baseDataCleaning(originDoc) - - delete(originDoc, mongoOptionId) - delete(originDoc, common.BKInstIDField) - - jsonDoc, err := ccjson.MarshalToString(originDoc) - if err != nil { - return nil, keywords, err - } - - keywords = append(keywords, analysisJSONKeywords(gjson.Parse(jsonDoc))...) - document[meta.TablePropertyName] = map[string]interface{}{ - propertyId: map[string]interface{}{ - tableId: keywords, - }, - } - return document, keywords, nil -} - -// validateAttribute validate the attribute. -func validateAttribute(attr map[string]interface{}) error { - id, err := getMetaIdToStr(attr[mongoOptionId]) - if err != nil { - return err - } - if id == "" || id == "0" { - return fmt.Errorf("invalid attribute id: %s", id) - } - return nil -} - -// IsTableInstCollection check if the collection is table inst collection. -func IsTableInstCollection(c string) bool { - regex := regexp.MustCompile(tableInstRegex) - return regex.MatchString(c) -} - -// IsTableObjId check if the objId is table objId. -func IsTableObjId(objId string) bool { - regex := regexp.MustCompile(tableObjIdRegex) - return regex.MatchString(objId) -} - -// updateModelTableProperties update model table property. -func updateModelTableProperties(document map[string]interface{}, attrs []map[string]interface{}) error { - if len(attrs) == 0 { - return nil - } - tables := make(map[string]interface{}) - for _, attribute := range attrs { - keywords := make([]string, 0) - propertyID, err := getMetaIdToStr(attribute[common.BKPropertyIDField]) - if err != nil { - log.Printf("get property id failed, err: %v", err) - continue - } - option, err := meta.ParseTableAttrOption(attribute[common.BKOptionField]) - if err != nil { - log.Printf("parse option failed, %+v, %v", attribute, err) - continue - } - if len(option.Header) == 0 { - log.Printf("table header is empty, attribute: %+v", attribute) - continue - } - for _, header := range option.Header { - jsonDoc, err := ccjson.MarshalToString(map[string]interface{}{ - common.BKPropertyIDField: header.PropertyID, - common.BKPropertyNameField: header.PropertyName, - }) - if err != nil { - log.Printf("marshal table header failed, attribute: %+v, err: %v", attribute, err) - continue - } - keywords = append(keywords, analysisJSONKeywords(gjson.Parse(jsonDoc))...) - } - // 0 为占位符,保持搜索时模型和实例的统一 - // todo 临时方案,后续优化 - tables[propertyID] = map[string]interface{}{nullMetaId: keywords} - } - document[meta.TablePropertyName] = tables - return nil -} - -// getDocumentIdType get the document id type. -func getDocumentIdType(collection string) string { - var idType string - // analysis collection document id. - switch collection { - case common.BKTableNameBaseBizSet: - idType = common.BKBizSetIDField - case common.BKTableNameBaseApp: - idType = common.BKAppIDField - case common.BKTableNameBaseSet: - idType = common.BKSetIDField - case common.BKTableNameBaseModule: - idType = common.BKModuleIDField - case common.BKTableNameBaseHost: - idType = common.BKHostIDField - default: - idType = common.BKInstIDField - } - return idType -} - -// getEsIndexByObjId get the es index by object id. -func getEsIndexByObjId(objId string) string { - var index string - switch objId { - case common.BKInnerObjIDBizSet: - index = fmt.Sprintf("%s_%s", meta.IndexNameBizSet, indexVersionBizSet) - case common.BKInnerObjIDApp: - index = fmt.Sprintf("%s_%s", meta.IndexNameBiz, indexVersionBiz) - case common.BKInnerObjIDSet: - index = fmt.Sprintf("%s_%s", meta.IndexNameSet, indexVersionSet) - case common.BKInnerObjIDModule: - index = fmt.Sprintf("%s_%s", meta.IndexNameModule, indexVersionModule) - case common.BKInnerObjIDHost: - index = fmt.Sprintf("%s_%s", meta.IndexNameHost, indexVersionHost) - default: - index = fmt.Sprintf("%s_%s", meta.IndexNameObjectInstance, indexVersionObjectInstance) - } - return index -} - -// getEsIdFromDoc get the es id from mongo document. -func getEsIdFromDoc(doc map[string]interface{}, collection string) (string, string, error) { - documentID, ok := doc[mongoMetaId].(primitive.ObjectID) - if !ok { - return "", "", errors.New("missing document metadata id") - } - var objectID, index string - objectID = documentID.Hex() - switch collection { - case common.BKTableNameBaseBizSet: - objectID = fmt.Sprintf("%s:%s", objectID, common.BKInnerObjIDBizSet) - index = fmt.Sprintf("%s_%s", meta.IndexNameBizSet, indexVersionBizSet) - case common.BKTableNameBaseApp: - objectID = fmt.Sprintf("%s:%s", objectID, common.BKInnerObjIDApp) - index = fmt.Sprintf("%s_%s", meta.IndexNameBiz, indexVersionBiz) - case common.BKTableNameBaseSet: - objectID = fmt.Sprintf("%s:%s", objectID, common.BKInnerObjIDSet) - index = fmt.Sprintf("%s_%s", meta.IndexNameSet, indexVersionSet) - case common.BKTableNameBaseModule: - objectID = fmt.Sprintf("%s:%s", objectID, common.BKInnerObjIDModule) - index = fmt.Sprintf("%s_%s", meta.IndexNameModule, indexVersionModule) - case common.BKTableNameBaseHost: - objectID = fmt.Sprintf("%s:%s", objectID, common.BKInnerObjIDHost) - index = fmt.Sprintf("%s_%s", meta.IndexNameHost, indexVersionHost) - case common.BKTableNameObjDes, common.BKTableNameObjAttDes: - objectID = fmt.Sprintf("%s:%s", objectID, common.BKInnerObjIDObject) - index = fmt.Sprintf("%s_%s", meta.IndexNameModel, indexVersionModel) - default: - objectID = fmt.Sprintf("%s:%s", objectID, commonObject) - index = fmt.Sprintf("%s_%s", meta.IndexNameObjectInstance, indexVersionObjectInstance) - } - return objectID, index, nil -}