From f1343bfa006357970696e81acd02f627397c6c62 Mon Sep 17 00:00:00 2001 From: Xiaodan Zhang Date: Thu, 15 Sep 2022 22:25:37 -0400 Subject: [PATCH] release120 --- .gitignore | 27 + .golangci.yaml | 57 + README.md | 1432 ++++- api/account.go | 30 + api/account_test.go | 143 + api/client.go | 457 ++ api/client_test.go | 130 + api/database.go | 100 + api/database_test.go | 38 + api/patitioned_table_appender.go | 351 ++ api/patitioned_table_appender_test.go | 61 + api/pool.go | 194 + api/pool_test.go | 65 + api/request.go | 551 ++ api/request_test.go | 40 + api/table.go | 134 + api/table_appender.go | 122 + api/table_appender_test.go | 43 + api/table_test.go | 18 + api/task.go | 29 + api/task_test.go | 24 + api/utils.go | 85 + api/utils_test.go | 24 + dialer/dialer.go | 368 ++ dialer/dialer_test.go | 149 + dialer/protocol/buffer.go | 102 + dialer/protocol/buffer_test.go | 18 + dialer/protocol/byte_order.go | 41 + dialer/protocol/byte_order_test.go | 15 + dialer/protocol/constant.go | 17 + dialer/protocol/constant_test.go | 1 + dialer/protocol/reader.go | 85 + dialer/protocol/reader_test.go | 29 + dialer/protocol/unsafeslice.go | 185 + dialer/protocol/unsafeslice_test.go | 63 + dialer/protocol/writer.go | 44 + dialer/protocol/writer_test.go | 25 + dialer/request.go | 77 + dialer/request_test.go | 1 + dialer/response.go | 75 + dialer/response_test.go | 1 + dialer/util.go | 58 + dialer/util_test.go | 1 + domain/domain.go | 101 + domain/domain_test.go | 71 + domain/hash_domain.go | 43 + domain/hash_domain_test.go | 33 + domain/list_domain.go | 81 + domain/list_domain_test.go | 33 + domain/range_domain.go | 49 + domain/range_domain_test.go | 48 + domain/value_domain.go | 44 + domain/value_domain_test.go | 25 + errors/protocol_errors.go | 32 + errors/protocol_errors_test.go | 21 + example/apis/account.go | 24 + example/apis/const.go | 19 + example/apis/database.go | 64 + example/apis/pool.go | 242 + example/apis/table.go | 172 + example/example.go | 118 + example/mtw/mtw.go | 184 + example/script/function.go | 35 + example/script/script.go | 580 ++ example/streaming_test/streaming.go | 210 + example/util/util.go | 30 + go.mod | 16 + go.sum | 45 + images/ddb.svg | 1 - model/chart.go | 218 + model/chart_test.go | 68 + model/const.go | 192 + model/const_test.go | 1 + model/dataform.go | 79 + model/dataform_test.go | 75 + model/datatype.go | 581 ++ model/datatype_list.go | 1741 ++++++ model/datatype_list_test.go | 676 +++ model/datatype_test.go | 511 ++ model/dictionary.go | 160 + model/dictionary_test.go | 56 + model/io.go | 626 ++ model/io_test.go | 317 ++ model/matrix.go | 170 + model/matrix_test.go | 51 + model/pair.go | 76 + model/pair_test.go | 30 + model/parse_dataform.go | 297 + model/parse_dataform_test.go | 75 + model/parse_datatype.go | 158 + model/parse_datatype_test.go | 85 + model/render_datatype.go | 400 ++ model/render_datatype_test.go | 19 + model/scalar.go | 71 + model/scalar_test.go | 34 + model/set.go | 68 + model/set_test.go | 30 + model/table.go | 177 + model/table_test.go | 41 + model/util.go | 271 + model/util_test.go | 175 + model/vector.go | 545 ++ model/vector_test.go | 108 + multigoroutinetable/multi_goroutine_table.go | 489 ++ .../multi_goroutine_table_test.go | 203 + multigoroutinetable/status.go | 56 + multigoroutinetable/status_test.go | 7 + multigoroutinetable/writer_goroutine.go | 344 ++ multigoroutinetable/writer_goroutine_test.go | 7 + streaming/abstract_client.go | 15 + streaming/abstract_client_test.go | 1 + streaming/basic_message.go | 58 + streaming/basic_message_test.go | 39 + streaming/connection_detector.go | 49 + streaming/connection_detector_test.go | 1 + streaming/goroutine_client.go | 228 + streaming/goroutine_client_test.go | 54 + streaming/goroutine_pooled_client.go | 252 + streaming/goroutine_pooled_client_test.go | 50 + streaming/handler_looper.go | 103 + streaming/handler_looper_test.go | 1 + streaming/listening.go | 85 + streaming/listening_test.go | 1 + streaming/message_handler.go | 41 + streaming/message_handler_test.go | 1 + streaming/message_parser.go | 167 + streaming/message_parser_test.go | 1 + streaming/polling_client.go | 149 + streaming/polling_client_test.go | 241 + streaming/reconnect_detector.go | 70 + streaming/reconnect_detector_test.go | 1 + streaming/reconnect_item.go | 33 + streaming/reconnect_item_test.go | 1 + streaming/subscriber.go | 311 + streaming/subscriber_test.go | 1 + streaming/topic_poller.go | 59 + streaming/topic_poller_test.go | 49 + streaming/util.go | 379 ++ streaming/util_test.go | 67 + streaming/var.go | 19 + streaming/var_test.go | 1 + test/basicTypeTest/basicChart_test.go | 106 + test/basicTypeTest/basicDictionary_test.go | 1149 ++++ test/basicTypeTest/basicMatrix_test.go | 1485 +++++ test/basicTypeTest/basicPair_test.go | 1580 +++++ test/basicTypeTest/basicScalar_test.go | 2385 ++++++++ test/basicTypeTest/basicSet_test.go | 1841 ++++++ test/basicTypeTest/basicTable_test.go | 2727 +++++++++ test/basicTypeTest/basicVector_test.go | 2345 ++++++++ test/connectionPool_test.go | 1643 ++++++ test/createDatabase_test.go | 624 ++ test/dbConnection_test.go | 367 ++ test/dfsTable_test.go | 404 ++ test/dropPartition_tabletMultiple_test.go | 805 +++ test/dropPartition_tabletSingle_test.go | 858 +++ test/existsDatabase_test.go | 251 + test/existsTable_test.go | 328 ++ test/loadTableBySQL_test.go | 189 + test/loadTable_test.go | 430 ++ test/loadText_test.go | 44 + test/multigoroutinetable_test.go | 5062 +++++++++++++++++ test/ploadText_test.go | 39 + test/run_function_test.go | 106 + test/saveTable_test.go | 217 + test/saveText_test.go | 97 + test/setup/settings.go | 25 + test/streaming/goroutineClient_test.go | 701 +++ test/streaming/goroutinePooledClient_test.go | 439 ++ test/streaming/pollingClient_test.go | 319 ++ test/streaming/util.go | 703 +++ test/table_test.go | 604 ++ test/undef_test.go | 155 + test/util.go | 703 +++ 173 files changed, 48370 insertions(+), 8 deletions(-) create mode 100644 .gitignore create mode 100644 .golangci.yaml create mode 100644 api/account.go create mode 100644 api/account_test.go create mode 100644 api/client.go create mode 100644 api/client_test.go create mode 100644 api/database.go create mode 100644 api/database_test.go create mode 100644 api/patitioned_table_appender.go create mode 100644 api/patitioned_table_appender_test.go create mode 100644 api/pool.go create mode 100644 api/pool_test.go create mode 100644 api/request.go create mode 100644 api/request_test.go create mode 100644 api/table.go create mode 100644 api/table_appender.go create mode 100644 api/table_appender_test.go create mode 100644 api/table_test.go create mode 100644 api/task.go create mode 100644 api/task_test.go create mode 100644 api/utils.go create mode 100644 api/utils_test.go create mode 100644 dialer/dialer.go create mode 100644 dialer/dialer_test.go create mode 100644 dialer/protocol/buffer.go create mode 100644 dialer/protocol/buffer_test.go create mode 100644 dialer/protocol/byte_order.go create mode 100644 dialer/protocol/byte_order_test.go create mode 100644 dialer/protocol/constant.go create mode 100644 dialer/protocol/constant_test.go create mode 100644 dialer/protocol/reader.go create mode 100644 dialer/protocol/reader_test.go create mode 100644 dialer/protocol/unsafeslice.go create mode 100644 dialer/protocol/unsafeslice_test.go create mode 100644 dialer/protocol/writer.go create mode 100644 dialer/protocol/writer_test.go create mode 100644 dialer/request.go create mode 100644 dialer/request_test.go create mode 100644 dialer/response.go create mode 100644 dialer/response_test.go create mode 100644 dialer/util.go create mode 100644 dialer/util_test.go create mode 100644 domain/domain.go create mode 100644 domain/domain_test.go create mode 100644 domain/hash_domain.go create mode 100644 domain/hash_domain_test.go create mode 100644 domain/list_domain.go create mode 100644 domain/list_domain_test.go create mode 100644 domain/range_domain.go create mode 100644 domain/range_domain_test.go create mode 100644 domain/value_domain.go create mode 100644 domain/value_domain_test.go create mode 100644 errors/protocol_errors.go create mode 100644 errors/protocol_errors_test.go create mode 100644 example/apis/account.go create mode 100644 example/apis/const.go create mode 100644 example/apis/database.go create mode 100644 example/apis/pool.go create mode 100644 example/apis/table.go create mode 100644 example/example.go create mode 100644 example/mtw/mtw.go create mode 100644 example/script/function.go create mode 100644 example/script/script.go create mode 100644 example/streaming_test/streaming.go create mode 100644 example/util/util.go create mode 100644 go.mod create mode 100644 go.sum delete mode 100644 images/ddb.svg create mode 100644 model/chart.go create mode 100644 model/chart_test.go create mode 100644 model/const.go create mode 100644 model/const_test.go create mode 100644 model/dataform.go create mode 100644 model/dataform_test.go create mode 100644 model/datatype.go create mode 100644 model/datatype_list.go create mode 100644 model/datatype_list_test.go create mode 100644 model/datatype_test.go create mode 100644 model/dictionary.go create mode 100644 model/dictionary_test.go create mode 100644 model/io.go create mode 100644 model/io_test.go create mode 100644 model/matrix.go create mode 100644 model/matrix_test.go create mode 100644 model/pair.go create mode 100644 model/pair_test.go create mode 100644 model/parse_dataform.go create mode 100644 model/parse_dataform_test.go create mode 100644 model/parse_datatype.go create mode 100644 model/parse_datatype_test.go create mode 100644 model/render_datatype.go create mode 100644 model/render_datatype_test.go create mode 100644 model/scalar.go create mode 100644 model/scalar_test.go create mode 100644 model/set.go create mode 100644 model/set_test.go create mode 100644 model/table.go create mode 100644 model/table_test.go create mode 100644 model/util.go create mode 100644 model/util_test.go create mode 100644 model/vector.go create mode 100644 model/vector_test.go create mode 100644 multigoroutinetable/multi_goroutine_table.go create mode 100644 multigoroutinetable/multi_goroutine_table_test.go create mode 100644 multigoroutinetable/status.go create mode 100644 multigoroutinetable/status_test.go create mode 100644 multigoroutinetable/writer_goroutine.go create mode 100644 multigoroutinetable/writer_goroutine_test.go create mode 100644 streaming/abstract_client.go create mode 100644 streaming/abstract_client_test.go create mode 100644 streaming/basic_message.go create mode 100644 streaming/basic_message_test.go create mode 100644 streaming/connection_detector.go create mode 100644 streaming/connection_detector_test.go create mode 100644 streaming/goroutine_client.go create mode 100644 streaming/goroutine_client_test.go create mode 100644 streaming/goroutine_pooled_client.go create mode 100644 streaming/goroutine_pooled_client_test.go create mode 100644 streaming/handler_looper.go create mode 100644 streaming/handler_looper_test.go create mode 100644 streaming/listening.go create mode 100644 streaming/listening_test.go create mode 100644 streaming/message_handler.go create mode 100644 streaming/message_handler_test.go create mode 100644 streaming/message_parser.go create mode 100644 streaming/message_parser_test.go create mode 100644 streaming/polling_client.go create mode 100644 streaming/polling_client_test.go create mode 100644 streaming/reconnect_detector.go create mode 100644 streaming/reconnect_detector_test.go create mode 100644 streaming/reconnect_item.go create mode 100644 streaming/reconnect_item_test.go create mode 100644 streaming/subscriber.go create mode 100644 streaming/subscriber_test.go create mode 100644 streaming/topic_poller.go create mode 100644 streaming/topic_poller_test.go create mode 100644 streaming/util.go create mode 100644 streaming/util_test.go create mode 100644 streaming/var.go create mode 100644 streaming/var_test.go create mode 100644 test/basicTypeTest/basicChart_test.go create mode 100644 test/basicTypeTest/basicDictionary_test.go create mode 100644 test/basicTypeTest/basicMatrix_test.go create mode 100644 test/basicTypeTest/basicPair_test.go create mode 100644 test/basicTypeTest/basicScalar_test.go create mode 100644 test/basicTypeTest/basicSet_test.go create mode 100644 test/basicTypeTest/basicTable_test.go create mode 100644 test/basicTypeTest/basicVector_test.go create mode 100644 test/connectionPool_test.go create mode 100644 test/createDatabase_test.go create mode 100644 test/dbConnection_test.go create mode 100644 test/dfsTable_test.go create mode 100644 test/dropPartition_tabletMultiple_test.go create mode 100644 test/dropPartition_tabletSingle_test.go create mode 100644 test/existsDatabase_test.go create mode 100644 test/existsTable_test.go create mode 100644 test/loadTableBySQL_test.go create mode 100644 test/loadTable_test.go create mode 100644 test/loadText_test.go create mode 100644 test/multigoroutinetable_test.go create mode 100644 test/ploadText_test.go create mode 100644 test/run_function_test.go create mode 100644 test/saveTable_test.go create mode 100644 test/saveText_test.go create mode 100644 test/setup/settings.go create mode 100644 test/streaming/goroutineClient_test.go create mode 100644 test/streaming/goroutinePooledClient_test.go create mode 100644 test/streaming/pollingClient_test.go create mode 100644 test/streaming/util.go create mode 100644 test/table_test.go create mode 100644 test/undef_test.go create mode 100644 test/util.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..29d754d --- /dev/null +++ b/.gitignore @@ -0,0 +1,27 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ +.vscode/ +*.dos +coverage.txt +performance/ diff --git a/.golangci.yaml b/.golangci.yaml new file mode 100644 index 0000000..f599e09 --- /dev/null +++ b/.golangci.yaml @@ -0,0 +1,57 @@ +run: + skip-dirs: + - test + - performance + - example +linters-settings: + stylecheck: + # Select the Go version to target. + # Default: 1.13 + go: "1.15" + # https://staticcheck.io/docs/options#checks + checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022"] + # https://staticcheck.io/docs/options#dot_import_whitelist + dot-import-whitelist: + - fmt + # https://staticcheck.io/docs/options#initialisms + initialisms: ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS"] + # https://staticcheck.io/docs/options#http_status_code_whitelist + http-status-code-whitelist: ["200", "400", "404", "500"] +linters: + enable-all: true + disable: + - wrapcheck + - ireturn + - dupl + - varnamelen + - gosec + - gocyclo + - stylecheck + - nlreturn + - gochecknoglobals + - goerr113 + - gomnd + - maintidx + - gofumpt + - gci + - gocritic + - forcetypeassert + - exhaustivestruct + - exhaustruct + - exhaustive + - cyclop + - containedctx + - wsl + - paralleltest + - testpackage + - maligned + - gocognit + - forbidigo + - nestif + - lll + - funlen + - godox + - scopelint + - golint + - interfacer + - ifshort \ No newline at end of file diff --git a/README.md b/README.md index 00ec644..7c71e3a 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,1429 @@ -# DolphinDB Go API +

DolphinDB Go API

-

- DolphinDB -

+[![GitHub release](https://img.shields.io/github/release/dolphindb/api-go.svg?style=flat-square)](https://github.com/dolphindb/api-go/releases/latest) +[![PkgGoDev](https://img.shields.io/badge/go.dev-docs-007d9c?style=flat-square&logo=go&logoColor=white)](https://pkg.go.dev/github.com/dolphindb/api-go) -因 DolphinDB server 版本分为 release130 与 release200。两个版本功能细微差别,release200 除支持 release130 版本 server 所有功能外,还支持 TSDB 存储引擎,以及和该引擎相关的一些功能,例如支持 array vector 等。Go API 提供同 server 版本功能一致的 readme 文件,分别介绍各个版本 API 支持的功能,以引导用户更方便的选择合适版本的 DolphinDB server。 +欢迎使用 DolphinDB Go API。通过 Go API,您可以直接在 Golang 环境中连接 DolphinDB 数据库,进行建库建表,以及读取、写入数据等操作。 -[readme for release200](https://github.com/dolphindb/api-go/blob/release200/README.md) +- [1. Go API 概述](#1-go-api-概述) +- [2. 安装依赖](#2-安装依赖) +- [3. DolphinDB 使用示例](#3-dolphindb-使用示例) + - [3.1. 初始化 DolphinDB](#31-初始化-dolphindb) + - [3.2. 通过 API 建库建表](#32-通过-api-建库建表) + - [3.3. 基础函数使用](#33-基础函数使用) + - [3.3.1. 构造数据类型](#331-构造数据类型) + - [3.3.1.1. NewDataType 入参对照表](#3311-newdatatype-入参对照表) + - [3.3.1.2. NewDataTypeListWithRaw 入参对照表](#3312-newdatatypelistwithraw-入参对照表) + - [3.3.1.3. Null 值对照表](#3313-null-值对照表) + - [3.3.2. 完整示例](#332-完整示例) + - [3.4. 初始化 DBConnectionPool](#34-初始化-dbconnectionpool) +- [4. 读写 DolphinDB 数据表](#4-读写-dolphindb-数据表) + - [4.1. 读取和使用数据表](#41-读取和使用数据表) + - [4.1.1. 读取分布式表](#411-读取分布式表) + - [4.1.2. 使用 Table 对象](#412-使用-table-对象) + - [4.2. 保存数据到 DolphinDB 内存表](#42-保存数据到-dolphindb-内存表) + - [4.2.1. 使用 `insert into` 追加单行数据](#421-使用-insert-into-追加单行数据) + - [4.2.2. 使用 `tableInsert` 函数向表中批量追加数组对象](#422-使用-tableinsert-函数向表中批量追加数组对象) + - [4.2.3. 使用 `tableInsert` 函数向表中追加 `Table` 对象](#423-使用-tableinsert-函数向表中追加-table-对象) + - [4.3. 保存数据到分布式表](#43-保存数据到分布式表) + - [4.3.1. 同步追加数据](#431-同步追加数据) + - [4.3.1.1. 使用 `tableInsert` 函数将 `Table` 对象插入分布式表](#4311-使用-tableinsert-函数将-table-对象插入分布式表) + - [4.3.2. 异步追加数据](#432-异步追加数据) + - [4.3.2.1. 分布式表的并发写入](#4321-分布式表的并发写入) + - [4.4. 批量异步追加数据](#44-批量异步追加数据) + - [4.4.1. MultiGoroutineTable](#441-multigoroutinetable) + - [4.4.2. MultiGoroutineTable 常见错误](#442-multigoroutinetable-常见错误) +- [5. 流数据 API](#5-流数据-api) + - [5.1. 代码示例:](#51-代码示例) + - [5.2. 断线重连](#52-断线重连) + - [5.3. 启用 Filter](#53-启用-filter) + - [5.4. 取消订阅](#54-取消订阅) -[readme for release130](https://github.com/dolphindb/api-go/blob/release130/README.md) +## 1. Go API 概述 + +Go API 需要运行在 golang 1.15 或以上版本的环境 +Go API 使用 `DataForm` 接口来存储从服务端返回的数据。该接口提供 `GetDataForm` 方法来获取数据形式,目前可以获取8种数据形式,并转换为 `DolphinDB` 的数据形式。二者对应关系见下表: + +| `GetDataForm` 返回值 | 实际类型 | +| -------------------- | ---------- | +| DfScalar(0) | Scalar | +| DfVector(1) | Vector | +| DfPair(2) | Pair | +| DfMatrix(3) | Matrix | +| DfSet(4) | Set | +| DfDictionary(5) | Dictionary | +| DfTable(6) | Table | + +Go API 提供的最核心的接口是 `DolphinDB`。Go API 通过它在 `DolphinDB` 服务器上执行脚本和函数,并在两者之间双向传递数据。通过 `NewDolphinDBClient` 或者 `NewSimpleDolphinDBClient` 初始化 `DolphinDB` 实例对象。该对象提供以下主要方法: + +| 方法名 | 详情 | +| ------------------------- | ----------------------------------------------- | +| Connect(host, port) | 将会话连接到 DolphinDB 服务器 | +| Login(LoginRequest) | 登录服务器 | +| Logout(LogoutRequest) | 登出服务器 | +| RunScript(script) | 将脚本在 DolphinDB 服务器运行 | +| RunFile(fileName) | 读取文件中的脚本,将脚本在 DolphinDB 服务器运行 | +| RunFunc(funcName,args) | 调用 DolphinDB 服务器上的函数 | +| Upload(variableObjectMap) | 将本地数据对象上传到 DolphinDB 服务器 | +| Close() | 关闭当前会话 | +| IsClosed() | 判断会话是否关闭 | +| GetSession() | 获取当前会话的 SessionID | + +还提供以下方法,进行数据库操作: + +| 方法名 | 详情 | +| ------------------------------------------- | ------------------------------------- | +| ExistsDatabase(ExistsDatabaseRequest) | 检查数据库是否存在 | +| Database(DatabaseRequest) | 创建数据库 | +| DropDatabase(DropDatabaseRequest) | 丢弃数据库 | +| ExistsTable(ExistsTableRequest) | 检查表是否存在 | +| Table(TableRequest) | 创建内存表 | +| TableWithCapacity(TableWithCapacityRequest) | 创建指定容量的内存表 | +| SaveTable(SaveTableRequest) | 保存表 | +| LoadTable(LoadTableRequest) | 加载表 | +| LoadText(LoadTextRequest) | 将数据文件加载到 DolphinDB 的内存表中 | +| SaveText(SaveTextRequest) | 保存文本 | +| PloadText(PloadTextRequest) | 将数据文件并行加载到内存中 | +| LoadTableBySQL(LoadTableBySQLRequest) | 通过 SQL 语句加载表 | +| DropPartition(DropPartitionRequest) | 丢弃数据库的指定分区数据 | +| DropTable(DropTableRequest) | 删除表 | +| Undef(UndefRequest) | 取消设置指定对象 | +| UndefAll() | 取消设置所有所有对象 | +| ClearAllCache(ClearAllCacheRequest) | 清除所有缓存 | + +使用 `Database` 方法创建数据库后,会返回一个 `Database` 对象。该对象包含以下方法: + +| 方法名 | 详情 | +| ----------------------------------------------------- | ---------- | +| CreateTable(CreateTableRequest) | 创建维度表 | +| CreatePartitionedTable(CreatePartitionedTableRequest) | 创建分区表 | + +`Go API` 的实际用例参见 [example目录](https://github.com/dolphindb/api-go/tree/master/example)。 + +## 2. 安装依赖 + +使用 `go get` 下载安装 `Go API` + +```sh +$ go get -u github.com/dolphindb/api-go +``` + +## 3. DolphinDB 使用示例 + +### 3.1. 初始化 DolphinDB + +Go API 支持通过 `NewDolphinDBClient` 和 `NewSimpleDolphinDBClient` 两种方式来初始化 `DolphinDB` 实例: + +1. NewDolphinDBClient 仅初始化客户端,需要通过 Connect 和 Login 去连接和登录服务端。该方法支持配置行为标识。 + +```go +package main + +import ( + "context" + + "github.com/dolphindb/api-go/api" +) + +func main() { + host := "" + // init client + db, err := api.NewDolphinDBClient(context.TODO(), host, nil) + if err != nil { + // Handle exception + panic(err) + } + + // connect to server + err = db.Connect() + if err != nil { + // Handle exception + panic(err) + } + + // init login request + loginReq := &api.LoginRequest{ + UserID: "userID", + Password: "password", + } + + // login dolphindb + err = db.Login(loginReq) + if err != nil { + // Handle exception + panic(err) + } +} +``` + +2. NewSimpleDolphinDBClient 初始化客户端,并连接和登录服务端。该方法不支持配置行为标识。 + +```go +package main + +import ( + "context" + + "github.com/dolphindb/api-go/api" +) + +func main() { + host := "" + + // new a client which has logged in the server + db,err := api.NewSimpleDolphinDBClient(context.TODO(), host, "userID", "passWord") + if err != nil { + // Handle exception + panic(err) + } +} +``` + +### 3.2. 通过 API 建库建表 + +```go +package main + +import ( + "context" + + "github.com/dolphindb/api-go/api" +) + +func main() { + host := "" + db, err := api.NewSimpleDolphinDBClient(context.TODO(), host, "userID", "passWord") + if err != nil { + // Handle exception + panic(err) + } + + // init create database request + dbReq := &api.DatabaseRequest{ + Directory: "dfs://db1", + PartitionType: "VALUE", + PartitionScheme: "1..10", + DBHandle: "example", + } + + // create database + dt, err := db.Database(dbReq) + if err != nil { + // Handle exception + panic(err) + } + + // init create partitioned table request + createReq := &api.CreatePartitionedTableRequest{ + SrcTable: "sourceTable", + PartitionedTableName: "tableName", + PartitionColumns: []string{"id"}, + } + + // create partitioned table with database handler + _, err = dt.CreatePartitionedTable(createReq) + if err != nil { + // Handle exception + panic(err) + } +} +``` + +### 3.3. 基础函数使用 + +#### 3.3.1. 构造数据类型 + +Go API 提供 `NewDataType` 方法构造数据类型对象,还提供 `NewDataTypeList` 以及 `NewDataTypeListWithRaw` 方法构造数据类型数组,本节通过例子介绍常用数据类型及其数组的构造方法。 +当您有可用的数据类型对象时,可以使用 `NewDataTypeList` 构造数据类型数组。否则您可以使用 `NewDataTypeListWithRaw` 来构造数据类型数组,该方法的入参可以参考[对照表](#3312-newdatatypelistwithraw-入参对照表)。 + +```go +package main + +import ( + "fmt" + + "github.com/dolphindb/api-go/model" +) + +// new a bool datatype variable +func main() { + // new a string datatype variable + dt, err := model.NewDataType(model.DtString, "sample") + if err != nil { + fmt.Println(err) + return + } + + // print value of variable with string format + fmt.Println(dt.String()) + + // print variable datatype + fmt.Println(dt.DataType()) + + // new datatypelist with datatype variable + dl := model.NewDataTypeList(model.DtString, []model.DataType{dt}) + + // print value of variable with string format + fmt.Println(dl.StringList()) + + // print number of elements + fmt.Println(dl.Len()) + + // print variable datatype + fmt.Println(dt.DataType()) + + // new datatypelist with basic type + dl, err = model.NewDataTypeListWithRaw(model.DtString, []string{"sample", "test"}) + if err != nil { + fmt.Println(err) + return + } + + // new a scalar object + s := model.NewScalar(dt) + + fmt.Println(s) + + // new a vector object + vct := model.NewVector(dl) + + fmt.Println(vct) + + // new a pair object + p := model.NewPair(vct) + + fmt.Println(p) + + // new a matrix object + data, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{2, 3, 4, 5, 6, 3, 4, 5, 6, 7, 4, 5, 6, 7, 8, 5, 6, 7, 8, 9, 6, 7, 8, 9, 10}) + if err != nil { + fmt.Println(err) + return + } + + rowlabel, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1, 2, 3, 4, 5}) + if err != nil { + fmt.Println(err) + return + } + + colLabel, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1, 2, 3, 4, 5}) + if err != nil { + fmt.Println(err) + return + } + + m := model.NewMatrix(model.NewVector(data), model.NewVector(rowlabel), model.NewVector(colLabel)) + + fmt.Println(m) + + // new a set object + set := model.NewSet(vct) + + fmt.Println(set) + + // new a dictionary object + keys, err := model.NewDataTypeListWithRaw(model.DtString, []string{"key1", "key2"}) + if err != nil { + fmt.Println(err) + return + } + + values, err := model.NewDataTypeListWithRaw(model.DtString, []string{"value1", "value2"}) + if err != nil { + fmt.Println(err) + return + } + + dict := model.NewDictionary(model.NewVector(keys), model.NewVector(values)) + + fmt.Println(dict) + + // new a table object + tb := model.NewTable([]string{"key"}, []*model.Vector{vct}) + + fmt.Println(tb) +} +``` + +##### 3.3.1.1. NewDataType 入参对照表 + +| datatype | arg | +| ------------------------------------------------------------ | ---------- | +| DtBool,DtChar | byte | +| DtBlob | []byte | +| DtComplex,DtPoint | [2]float64 | +| DtDouble | float64 | +| DtFloat | float32 | +| DtInt | int32 | +| DtLong | int64 | +| DtShort | int16 | +| DtTimestamp,DtMonth,DtSecond,DtNanoTimestamp,DtNanoTime,DtMinute,DtDatetime,DtDateHour,DtDate | time.Time | +| DtAny | Dataform | +| DtString,DtSymbol,DtUuid,DtIP,DtInt128,DtDuration | string | + +* 注:当 datatype 为 DtBool 时,传入 0 表示 false,传入 NullBool 表示 Null,其他值表示 true。 + +##### 3.3.1.2. NewDataTypeListWithRaw 入参对照表 + +因为 Golang 语法不允许一个数组里包含 nil,通过 Go API 传入包含空值的数组时,空值需填写为指定的方式,可以参考[Null 值对照表](#3313-null-值对照表) + +| datatype | args | +| ------------------------------------------------------------ | ------------ | +| DtBool,DtChar | []byte | +| DtBlob | [][]byte | +| DtComplex,DtPoint | [][2]float64 | +| DtDouble | []float64 | +| DtFloat | []float32 | +| DtInt | []int32 | +| DtLong | []int64 | +| DtShort | []int16 | +| DtTimestamp,DtMonth,DtSecond,DtNanoTimestamp,DtNanoTime,DtMinute,DtDatetime,DtDateHour,DtDate | []time.Time | +| DtAny | []Dataform | +| DtString,DtSymbol,DtUuid,DtIP,DtInt128,DtDuration | []string | + +* 注:当 datatype 为 DtBool 时,传入 0 表示 false,传入 NullBool 表示 Null,其他值表示 true。 + +##### 3.3.1.3 Null 值对照表 + +| datatype | 空值 | +| ------------------------------------------------------------ | ------------ | +| DtBool | NullBool | +| DtBlob | NullBlob | +| DtChar | NullChar | +| DtComplex | NullComplex | +| DtDate,DtDateHour,DtDatetime,DtMinute,DtNanoTime,DtNanoTimestamp,DtSecond,DtMonth,DtTimestamp | NullTime | +| DtDouble | NullDouble | +| DtFloat | NullFloat | +| DtDuration | NullDuration | +| DtInt | NullInt | +| DtInt128 | NullInt128 | +| DtIP | NullIP | +| DtLong | NullLong | +| DtPoint | NullPoint | +| DtShort | NullShort | +| DtUuid | NullUUID | +| DtAny | NullAny | +| DtString,DtSymbol | NullString | + +使用示例 + +```go +_, err := model.NewDataTypeListWithRaw(model.DtBool, []byte{1, 0, model.NullBool}) +if err != nil { + fmt.Println(err) + return +} +``` + +#### 3.3.2. 完整示例 + +```go +package main + +import ( + "context" + "fmt" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" +) + +func main() { + host := "" + db, err := api.NewSimpleDolphinDBClient(context.TODO(), host, "userID", "passWord") + if err != nil { + // Handle exceptions + panic(err) + } + + // run script on dolphindb server + raw, err := db.RunScript("schema(tablename)") + if err != nil { + // Handle err + panic(err) + } + + // print the real dataform + fmt.Println(raw.GetDataForm()) + + // get the variable with real type + dict := raw.(*model.Dictionary) + fmt.Println(dict) + + // declare the specified variable ont the server + _, err = db.Upload(map[string]model.DataForm{"dict": dict}) + if err != nil { + // Handle exception + panic(err) + } + + // run function on dolphindb server + _, err = db.RunFunc("typestr", []model.DataForm{dict}) + if err != nil { + // Handle exception + panic(err) + } +} +``` + + +### 3.4. 初始化 DBConnectionPool + +`DBConnectionPool` 可以复用多个 Connection。可以直接使用 `DBConnectionPool` 的 `Execute` 方法执行任务,然后使用 `Task` 的 `GetResult` 方法获取该任务的执行结果。 + +| 方法名 | 详情 | +| :----------------------------------- | :----------------- | +| NewDBConnectionPool(opt *PoolOption) | 初始化连接池对象 | +| Execute(tasks []*Task) | 执行批量任务 | +| GetPoolSize() | 获取连接数 | +| Close() | 关闭连接池 | +| IsClosed() | 检查连接池是否关闭 | + +PoolOption 参数说明: + +* Address:字符串,表示所连接的服务器的地址。 +* UserID / Password: 字符串,登录时的用户名和密码。 +* PoolSize:整数,表示连接池的容量。 +* LoadBalance:布尔值,表示是否开启负载均衡,开启后会根据各个数据节点的地址来创建连接池。 +* LoadBalanceAddresses: 字符串数组,用于指定数据节点。 + +`Task` 封装了查看任务执行结果的相关方法。 + +| 方法名 | 详情 | +| :---------- | :----------------------- | +| IsSuccess() | 任务是否执行成功 | +| GetResult() | 获取脚本运行结果 | +| GetError() | 获取任务运行时发生的错误 | + +建立一个 `DBConnectionPool` 连接数为10的连接池。 + +```go +poolOpt := &api.PoolOption{ + Address: "ServerIP:Port", + UserID: "UserID", + Password: "Password", + PoolSize: 10, +} + +pool, err := api.NewDBConnectionPool(poolOpt) +if err != nil { + fmt.Println(err) + return +} +``` + +创建一个任务。 + +```go +task := &api.Task{Script: "1..10"} +err = pool.Execute([]*api.Task{task}) +if err != nil { + fmt.Println(err) + return +} +``` + +检查任务是否执行成功。如果执行成功,获取相应结果;如果失败,获取错误。 + +```go +var data *model.Vector +if task.IsSuccess() { + data = task.GetResult().(*model.Vector) + fmt.Println(data) +} else { + fmt.Println(task.GetError()) +} +``` + +输出 + +``` +vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) +``` + +创建多个任务,在 `DBConnectionPool` 上并行调用。 + +```go +tasks := make([]*api.Task, 10) +for i := 0; i < 10; i++ { + tasks[i] = &api.Task{ + Script: "log", + Args: []model.DataForm{model.NewScalar(data.Get(i))}, + } +} + +err = pool.Execute(tasks) +if err != nil { + fmt.Println(err) + return +} +``` + +检查任务是否都执行成功。如果执行成功,获取相应结果;如果失败,获取错误。 + +```go +for _, v := range tasks { + if v.IsSuccess() { + fmt.Println(v.GetResult().String()) + } else { + fmt.Println(v.GetError()) + } +} +``` + +输出 + +```go +double(0) +double(0.6931471805599453) +double(1.0986122886681096) +double(1.3862943611198906) +double(1.6094379124341003) +double(1.791759469228055) +double(1.9459101490553132) +double(2.0794415416798357) +double(2.1972245773362196) +double(2.302585092994046) +``` + +## 4. 读写 DolphinDB 数据表 + +`DolphinDB` 数据表按存储方式分为两种: + +- 内存表: 数据仅保存在内存中,存取速度最快,但是节点关闭后数据就不存在了。 +- 分布式表:数据分布在不同的节点,通过 DolphinDB 的分布式计算引擎,逻辑上仍然可以像本地表一样做统一查询。 + +### 4.1. 读取和使用数据表 + +#### 4.1.1. 读取分布式表 + +在 Go API 中使用如下代码可读取分布式表数据。 + +```go +dbPath := "dfs://testDatabase" +tbName := "tb1" +conn, err := api.NewSimpleDolphinDBClient(context.TODO(), "ServerIP:Port", "admin", "123456") +if err != nil { + fmt.Println(err) + return +} + +tb, err := conn.RunScript(fmt.Sprintf("select * from loadTable('%s','%s') where cdate = 2017.05.03", dbPath, tbName)) +if err != nil { + fmt.Println(err) + return +} +``` + +#### 4.1.2. 使用 Table 对象 + +Go API 通过 `Table` 对象来存储数据表。`Table` 对象采用列式存储,无法直接读取行数据,因此需要先读取列,再读取行。 +以表对象 t 为例,其包含4个列,列名分别为cstring, cint, ctimestamp, cdouble,数据类型分别是STRING, INT, TIMESTAMP, DOUBLE。通过 Go API 分别打印 t 中每个列的列名和对应的值。 + +```go +for _, v := range t.GetColumnNames() { + fmt.Println("ColumnName: ", v) + col := table.GetColumnByName(v) + fmt.Println("ColumnValue: ", col.String()) +} +``` + +### 4.2. 保存数据到 DolphinDB 内存表 + +DolphinDB 提供多种方式来保存数据到内存表: + +- 通过 `insert into` 保存单条数据 +- 通过 `tableInsert` 函数批量保存多条数据 +- 通过 `tableInsert` 函数保存数据表 +- 通过 `append!` 追加数据到内存表 + +Go API 可以通过 `RunScript` 接口将插入数据的脚本发送至服务端执行。不建议通过 `append!` 函数保存数据,因为 `append!` 函数会返回表的 `schema`,产生不必要的通信量。 + +下面分别介绍三种方式保存数据的实例: +通过 GUI 在 DolphinDB server 端创建一个内存表,并将其共享。因为 `DolphinDB` 内变量是会话隔离的,所以该内存表只有当前会话可见。需要通过 share 在会话间共享内存表,以便其它会话可以访问。 + +```sql +t = table(10000:0,`cstring`cint`ctimestamp`cdouble,[STRING,INT,TIMESTAMP,DOUBLE]) +share t as sharedTable +``` + +由于内存表是会话隔离的,所以该内存表只有当前会话可见。如果需要在其它会话中访问,需要通过 `share` 在会话间共享内存表。 + +#### 4.2.1. 使用 `insert into` 追加单行数据 + +若将单条数据记录保存到 DolphinDB 内存表,可以使用类似 SQL 语句 insert into。 + +```go +func testSaveInsert(str string, i int, ts int64, dbl float64, db api.DolphinDB) { + df, err := db.RunScript(fmt.Sprintf("insert into sharedTable values('%s',%d,%d,%f)", str, i, ts, dbl)) + if err != nil { + fmt.Println(err) + return + } + + fmt.Println(df) +} +``` + +#### 4.2.2. 使用 `tableInsert` 函数向表中批量追加数组对象 + +`tableInsert` 可将多个数组追加到 `DolphinDB` 内存表中,比较适合用来批量保存数据。若 Go API 获取的数据可以组织成 `[]model.DataForm` 方式,可使用 `tableInsert` 函数保存。 + +```go +func testTableInsert(strVector, intVector, timestampVector, doubleVector *model.Vector, db api.DolphinDB) { + args := make([]model.DataForm, 4) + args[0] = strVector + args[1] = intVector + args[2] = timestampVector + args[3] = doubleVector + df, err := db.RunFunc("tableInsert{sharedTable}", args) + if err != nil { + fmt.Println(err) + return + } + + fmt.Println(df) +} +``` + +在本例中,使用了 `DolphinDB` 中的[部分应用](https://www.dolphindb.cn/cn/help/200/Functionalprogramming/PartialApplication.html)这一特性,将服务端表名以 `tableInsert{sharedTable}` 的方式固化到 `tableInsert` 中,作为一个独立函数来使用。 + +#### 4.2.3. 使用 `tableInsert` 函数向表中追加 `Table` 对象 + +`tableInsert` 函数也可以接受一个表对象作为参数,批量添加数据。Go API 将获取的数据处理后组织成 `Table` 对象后,通过 `tableInsert` 插入 `DolphinDB` 数据表。 + +```go +func testTableInsert(tableObj *model.Table, db api.DolphinDB) { + args := make([]model.DataForm, 1) + args[0] = tableObj + df, err := db.RunFunc("tableInsert{sharedTable}", args) + if err != nil { + fmt.Println(err) + return + } + + fmt.Println(df) +} +``` + +### 4.3. 保存数据到分布式表 + +分布式表是 `DolphinDB` 推荐在生产环境下使用的数据存储方式,它支持快照级别的事务隔离,保证数据一致性。分布式表支持多副本机制,既提供了数据容错能力,又能作为数据访问的负载均衡。Go API 可以同步或异步向 `DolphinDB` 分布式表插入数据。 + +#### 4.3.1. 同步追加数据 + +##### 4.3.1.1. 使用 `tableInsert` 函数将 `Table` 对象插入分布式表 + +通过 GUI 在 DolphinDB server 端创建一个分布式表: + +```sql +dbPath = 'dfs://testDatabase' +tbName = 'tb1' + +if(existsDatabase(dbPath)){dropDatabase(dbPath)} +db = database(dbPath,RANGE,2018.01.01..2018.12.31) +db.createPartitionedTable(t,tbName,'ctimestamp') +``` + +`DolphinDB` 提供 `loadTable` 方法加载分布式表,通过 `tableInsert` 方式向表中追加数据,具体的脚本示例如下: + +```go +func testTableInsert(dbPath string, tableObj *model.Table, db api.DolphinDB) { + args := make([]model.DataForm, 1) + args[0] = tableObj + df, err := db.RunFunc(fmt.Sprintf("tableInsert{loadTable('%s','tb1')}", dbPath), args) + if err != nil { + fmt.Println(err) + return + } + + fmt.Println(df) +} +``` + +#### 4.3.2. 异步追加数据 + +##### 4.3.2.1. 分布式表的并发写入 + +`DolphinDB` 的分布式表支持并发读写,Go API 提供 `PartitionedTableAppender` 来支持分布式表的并发写入,仅支持按表写入。 + +使用 1.30 版本以上的 server,可以通过 Go API 中的 PartitionedTableAppender 来写入分布式表。其基本原理是设计一个连接池用于多协程写入,将写入的数据按指定的分区列进行分类,并分别放入不同的连接并行写入。 + +下面展示如何在 Go 客户端中将数据并发写入 `DolphinDB` 的分布式表。 + +首先,在 `DolphinDB` 服务端执行以下脚本,创建分布式数据库 `"dfs://demohash"` 和分布式表 `"pt"`。其中,数据库按照 `VALUE-HASH-HASH` 的组合进行三级分区。 + +```sql +t = table(timestamp(1..10) as date,string(1..10) as sym) +db1=database(\"\",HASH,[DATETIME,10]) +db2=database(\"\",HASH,[STRING,5]) +if(existsDatabase(\"dfs://demohash\")){ + dropDatabase(\"dfs://demohash\") +} +db =database(\"dfs://demohash\",COMPO,[db2,db1]) +pt = db.createPartitionedTable(t,`pt,`sym`date) +``` + +然后,使用 Go API 初始化 `PartitionedTableAppender` 对象 + +```go +poolOpt := &api.PoolOption{ + Address: "ServerIP:Port", + UserID: "admin", + Password: "123456", + PoolSize: 3, + LoadBalance: true, +} + +pool, err := api.NewDBConnectionPool(poolOpt) +if err != nil { + fmt.Println(err) + return +} + +appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", +} + +appender, err := api.NewPartitionedTableAppender(appenderOpt) +if err != nil { + fmt.Println(err) + return +} +``` + +`PartitionedTableAppenderOption` 参数说明: +* Pool: 表示连接池。 +* DBPath: 字符串,分布式数据库路径。 +* TableName:字符串,表示分布式表名。 +* PartitionCol:字符串,表示分布式表列名。 +* AppendFunction: 可选,自定义写入函数名,不填此参数则调用内置 tableInsert 函数。 + +最后,将数据插入到分区表中: + +```go +colNames := []string{"sym", "date"} + +sym, err := model.NewDataTypeListWithRaw(model.DtString, []string{"sample", "test"}) +if err != nil { + fmt.Println(err) + return +} + +date, err := model.NewDataTypeListWithRaw(model.DtDatetime, []time.Time{time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC)}) +if err != nil { + fmt.Println(err) + return +} + +col1 := model.NewVector(sym) +col2 := model.NewVector(date) + +m, err := appender.Append(model.NewTable(colNames, []*model.Vector{col1, col2})) +if err != nil { + fmt.Println(err) + return +} + +fmt.Println(m) +``` + +### 4.4. 批量异步追加数据 + +DolphinDB Go API 提供 `MultiGoroutineTable` 对象用于批量异步追加数据,并在客户端维护了一个数据缓冲队列。当服务器端忙于网络 I/O 时,客户端写协程仍然可以将数据持续写入缓冲队列(该队列由客户端维护)。写入队列后即可返回,从而避免了写协程的忙等。目前,`MultiGoroutineTable` 支持批量写入数据到内存表、分区表和维度表。 + +注意对于异步写入: + +* 支持按行向表中追加数据。 +* API 客户端提交任务到缓冲队列,缓冲队列接到任务后,客户端即认为任务已完成。 +* 提供 `GetStatus` 方法查看状态。 + +#### 4.4.1. MultiGoroutineTable + +`MultiGoroutineTable` 支持多协程的并发写入。 + +`MultiGoroutineTable` 对象初始化如下: + +```go +opt := &multigoroutinetable.Option{ + Database: "dbName", + Address: "ServerIP:Port", + UserID: "admin", + Password: "123456", + TableName: "tbName", + GoroutineCount: 2, + PartitionCol: "colName", + BatchSize: 1000, + Throttle: 1, +} + +writer, err := multigoroutinetable.NewMultiGoroutineTable(opt) +if err != nil { + fmt.Println(err) + return +} +``` + +Option 参数说明: + +* Address 字符串,表示所连接的服务器的地址. +* UserID / Password: 字符串,登录时的用户名和密码。 +* Database: 字符串,表示数据库的路径或句柄。如果是内存表,则无需设置该参数。 +* TableName 字符串,表示表的名称。 +* BatchSize 整数,表示批处理的消息的数量。如果该参数值为 1,表示客户端写入数据后就立即发送给服务器; + 如果该参数大于 1,表示数据量达到 BatchSize 时,客户端才会将数据发送给服务器。 +* Throttle 大于 0 的整数,单位为毫秒。若客户端有数据写入,但数据量不足 BatchSize,则等待 Throttle 的时间再发送数据。 +* GoroutineCount 整数,表示创建的工作协程数量,如果值为 1,表示单协程。对于维度表,其值必须为 1。 +* PartitionCol 字符串类型,默认为空,仅在 GoroutineCount 大于1时起效。对于分区表,必须指定为分区字段名; + 如果是流表,必须指定为表的字段名;对于维度表,该参数不起效。 + +以下是 `MultiGoroutineTable` 对象包含的函数方法介绍: + +```go +Insert(args ...interface{}) error +``` + +函数说明: + +插入单行数据。返回一个 error 对象。 +数据类型需要与表的列存储的数据类型一致,或者为列存储的数据类型的基础类型,具体可参考[可用入参对照表](#3.3.1.1.-NewDataType-的可用入参对照表) +因写入是异步操作,所以当 error 为 nil 时,不代表写入操作成功。 +写入操作是否成功可以打印 `GetStatus` 方法返回的对象。 + +参数说明: + +* args: 是变长参数,代表插入一行数据 + +示例: + +```go +err = writer.Insert("2", time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC)) +``` + +```go +GetUnwrittenData() [][]model.DataType +``` + +函数说明: + +返回一个嵌套列表,表示未写入服务器的数据。 + +注意:该方法获取到数据资源后,`MultiGoroutineTable` 将释放这些数据资源。 + +示例: + +```go +unwrittenData := writer.GetUnwrittenData() +``` + +```go +InsertUnwrittenData(records [][]model.DataType) error +``` + +函数说明: + +将数据插入数据表。返回值同 insert 方法。与 insert 方法的区别在于,insert 只能插入单行数据,而 insertUnwrittenData 可以同时插入多行数据。 + +参数说明: + +* **records**:需要再次写入的数据。可以通过方法 GetUnwrittenData 获取该对象。 + +示例: + +```go +err = writer.InsertUnwrittenData(unwrittenData) +``` + +```go +GetStatus() *Status +``` + +函数说明: + +获取 `MultiGoroutineTable` 对象当前的运行状态。 + +参数说明: + +* **Status**:表示 MultiGoroutineTable 的执行状态,具有以下属性和方法 + +示例: + +```go +writeStatus := writer.GetStatus() +``` + + +status 属性: + +* isExit:写入协程是否正在退出。 +* errMsg:错误信息。 +* sentRows:成功发送的总记录数。 +* unSentRows:待发送的总记录数。 +* sendFailedRows:发送失败的总记录数。 +* goroutineStatus:写入协程状态列表。 + - goroutineId:协程 Id。 + - sentRows:该协程成功发送的记录数。 + - unSentRows:该协程待发送的记录数。 + - sendFailedRows:该协程发送失败的记录数。 + +```go +WaitForGoroutineCompletion() +``` + +函数说明: + +调用此方法后,`MultiGoroutineTable` 会进入等待状态,待后台工作协程全部完成后退出等待状态。 + +示例: + +```go +writer.WaitForGoroutineCompletion() +``` + +MultiGoroutineTable 的正常使用示例如下: + +```go +conn, err := api.NewSimpleDolphinDBClient(context.TODO(), "ServerIP:Port", "admin", "123456") +if err != nil { + return +} + +buf := bytes.NewBufferString("dbName = 'dfs://valuedb3'\n") +buf.WriteString("if (exists(dbName)){dropDatabase(dbName);}\n") +buf.WriteString("datetest = table(1000:0,`date`symbol`id,[DATE, SYMBOL, LONG]);\n") +buf.WriteString("db = database(directory= dbName, partitionType= HASH, partitionScheme=[INT, 10]);") +buf.WriteString("pt = db.createPartitionedTable(datetest,'pdatetest','id');") +_, err = conn.RunScript(buf.String()) +if err != nil { + return +} + +opt := &multigoroutinetable.Option{ + Database: "dfs://valuedb3", + Address: "ServerIP:Port", + UserID: "admin", + Password: "123456", + TableName: "pdatetest", + GoroutineCount: 5, + PartitionCol: "id", + BatchSize: 10000, + Throttle: 1, +} + +writer, err := multigoroutinetable.NewMultiGoroutineTable(opt) +if err != nil { + return +} + +// insert 100 row data +for ind := 0; ind < 100; ind++ { + err = writer.Insert(time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), "AAAAAAAAAB", rand.Int63()%10000) + if err != nil { + return + } +} + +// wait for insertion to complete +writer.WaitForGoroutineCompletion() + +status := writer.GetStatus() +fmt.Println("writeStatus: \n", status) + +raw, err := conn.RunScript("exec count(*) from pt") +if err != nil { + return +} + +fmt.Println(raw) +``` + +以上代码输出结果为 + +```sh +""" +writeStatus: +errMsg : +isExit : false +sentRows : 100 +unSentRows : 0 +sendFailedRows : 0 +goroutineStatus : + goroutineIndex: 0, sentRows: 18, unSentRows: 0, sendFailedRows: 0 + goroutineIndex: 1, sentRows: 23, unSentRows: 0, sendFailedRows: 0 + goroutineIndex: 2, sentRows: 19, unSentRows: 0, sendFailedRows: 0 + goroutineIndex: 3, sentRows: 20, unSentRows: 0, sendFailedRows: 0 + goroutineIndex: 4, sentRows: 20, unSentRows: 0, sendFailedRows: 0 + +long(100) +""" +``` + +调用 writer.Insert() 方法向 writer 中写入数据,并通过 writer.GetStatus() 获取 writer 的状态。 +注意,使用 writer.WaitForGoroutineCompletion() 方法等待 `MultiGoroutineTable` 写入完毕,会终止 `MultiGoroutineTable` 所有工作协程,保留最后一次写入信息。此时如果需要再次将数据写入 `MultiGoroutineTable`,需要重新获取新的 `MultiGoroutineTable` 对象,才能继续写入数据。 + +由上例可以看出,`MultiGoroutineTable` 内部使用多协程完成数据转换和写入任务。但在 `MultiGoroutineTable` 外部,API 客户端同样支持以多协程方式将数据写入 `MultiGoroutineTable`,且保证了多协程安全。 + +#### 4.4.2. MultiGoroutineTable 常见错误 + +MultiGoroutineTable 调用 Insert 方法插入数据时出错: + +在调用 MultiGoroutineTable 的 Insert 方法时,若插入数据的类型与表对应列的类型不匹配,则 MultiGoroutineTable 会立刻返回错误信息。 + +示例: + +```go +conn, err := api.NewSimpleDolphinDBClient(context.TODO(), "ServerIP:Port", "admin", "123456") +if err != nil { + return +} + +buf := bytes.NewBufferString("dbName = 'dfs://valuedb3'\n") +buf.WriteString("if (exists(dbName)){dropDatabase(dbName);}\n") +buf.WriteString("datetest = table(1000:0,`date`symbol`id,[DATE, SYMBOL, LONG]);\n") +buf.WriteString("db = database(directory= dbName, partitionType= HASH, partitionScheme=[INT, 10]);") +buf.WriteString("pt = db.createPartitionedTable(datetest,'pdatetest','id');") +_, err = conn.RunScript(buf.String()) +if err != nil { + return +} + +opt := &multigoroutinetable.Option{ + Database: "dfs://valuedb3", + Address: "ServerIP:Port", + UserID: "admin", + Password: "123456", + TableName: "pdatetest", + GoroutineCount: 5, + PartitionCol: "id", + BatchSize: 10000, + Throttle: 1, +} + +writer, err := multigoroutinetable.NewMultiGoroutineTable(opt) +if err != nil { + return +} + +// insert data with wrong type +err = writer.Insert(time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), 222, rand.Int63()%10000) +if err != nil { + fmt.Println(err) + return +} +``` + +以上代码输出结果为: + +```go +""" +the type of in must be string when datatype is DtString, DtCode, DtFunction, DtHandle or DtSymbol +""" +``` + +在调用 MultiGoroutineTable 的 Insert 方法时,若 Insert 插入数据的列数和表的列数不匹配,MultiGoroutineTable 会立刻返回错误信息。 + +示例: + +```go +conn, err := api.NewSimpleDolphinDBClient(context.TODO(), "ServerIP:Port", "admin", "123456") +if err != nil { + return +} + +buf := bytes.NewBufferString("dbName = 'dfs://valuedb3'\n") +buf.WriteString("if (exists(dbName)){dropDatabase(dbName);}\n") +buf.WriteString("datetest = table(1000:0,`date`symbol`id,[DATE, SYMBOL, LONG]);\n") +buf.WriteString("db = database(directory= dbName, partitionType= HASH, partitionScheme=[INT, 10]);") +buf.WriteString("pt = db.createPartitionedTable(datetest,'pdatetest','id');") +_, err = conn.RunScript(buf.String()) +if err != nil { + return +} + +opt := &multigoroutinetable.Option{ + Database: "dfs://valuedb3", + Address: "ServerIP:Port", + UserID: "admin", + Password: "123456", + TableName: "pdatetest", + GoroutineCount: 5, + PartitionCol: "id", + BatchSize: 10000, + Throttle: 1, +} + +writer, err := multigoroutinetable.NewMultiGoroutineTable(opt) +if err != nil { + return +} + +// insert data with more data +err = writer.Insert(time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), rand.Int63()%10000) +if err != nil { + fmt.Println(err) + return +} +``` + +以上代码输出结果为: + +```sh +""" +Column counts don't match +""" +``` + +如果 MultiGoroutineTable 在运行时连接断开,则所有工作协程被终止。继续通过 MultiGoroutineTable 向服务器写数据时,会因为工作协程终止而报错,且数据不会被写入。此时,可通过调用 MultiGoroutineTable 的 GetUnwrittenData 获取未插入的数据,并重新插入。 + +示例: + +```go +conn, err := api.NewSimpleDolphinDBClient(context.TODO(), "ServerIP:Port", "admin", "123456") +if err != nil { + return +} + +buf := bytes.NewBufferString("dbName = 'dfs://valuedb3'\n") +buf.WriteString("if (exists(dbName)){dropDatabase(dbName);}\n") +buf.WriteString("datetest = table(1000:0,`date`symbol`id,[DATE, SYMBOL, LONG]);\n") +buf.WriteString("db = database(directory= dbName, partitionType= HASH, partitionScheme=[INT, 10]);") +buf.WriteString("pt = db.createPartitionedTable(datetest,'pdatetest','id');") +_, err = conn.RunScript(buf.String()) +if err != nil { + return +} + +opt := &multigoroutinetable.Option{ + Database: "dfs://valuedb3", + Address: "ServerIP:Port", + UserID: "admin", + Password: "123456", + TableName: "pdatetest", + GoroutineCount: 5, + PartitionCol: "id", + BatchSize: 10000, + Throttle: 1, +} + +writer, err := multigoroutinetable.NewMultiGoroutineTable(opt) +if err != nil { + return +} + +// insert data with more data +err = writer.Insert(time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), rand.Int63()%10000) +if err != nil { + fmt.Println(err) + return +} + +unwriterdata := writer.GetUnwrittenData() +fmt.Println("unWriterdata: ", len(unwriterdata)) + +// renew MultiGoroutineTable object +writer, err = multigoroutinetable.NewMultiGoroutineTable(opt) +if err != nil { + return +} + +err = writer.InsertUnwrittenData(unwriterdata) +if err != nil { + return +} + +// wait for insertion to complete +writer.WaitForGoroutineCompletion() + +status := writer.GetStatus() +fmt.Println("writeStatus: \n", status) +``` + +以上代码输出结果为: + +```go +""" +unWriterdata: 10 +writeStatus: +errMsg : +isExit : true +sentRows : 10 +unSentRows : 0 +sendFailedRows : 0 +goroutineStatus : + goroutineIndex: 0, sentRows: 3, unSentRows: 0, sendFailedRows: 0 + goroutineIndex: 1, sentRows: 2, unSentRows: 0, sendFailedRows: 0 + goroutineIndex: 2, sentRows: 1, unSentRows: 0, sendFailedRows: 0 + goroutineIndex: 3, sentRows: 3, unSentRows: 0, sendFailedRows: 0 + goroutineIndex: 4, sentRows: 1, unSentRows: 0, sendFailedRows: 0 +""" +``` + +## 5. 流数据 API + +Go API 可以通过 API 订阅流数据。有三种创建订阅客户端的方式:单协程回调(GoroutineClient),多协程回调(GoroutinePooledClient)和通过 PollingClient 返回的对象获取消息队列. + +### 5.1. 代码示例: + +下面分别介绍如何通过3种方法订阅流数据 + +- 通过客户机上的应用程序定期去流数据表查询是否有新增数据,推荐使用 PollingClient + +```go +client := streaming.NewPollingClient("localhost", 8101) +req := &streaming.SubscribeRequest{ + Address: "ServerIP:Port", + TableName: "pub1", + ActionName: "action1", + Offset: 0, + Reconnect: true, +} + +poller, err := client.Subscribe(req) +if err != nil { + return +} + +msgs := poller.Poll(1000, 1000) +fmt.Println(msgs) +``` + +`SubscribeRequest` 参数说明: + +* Address: 发布端节点的地址 +* TableName:发布表的名称 +* ActionName:订阅任务的名称 +* BatchSize: 整数,表示批处理的消息的数量。如果它是正数,直到消息的数量达到 batchSize 时,Handler 才会处理进来的消息。如果它没有指定或者是非正数,消息到达之后,Handler 就会马上处理消息 +* Offset: 整数,表示订阅任务开始后的第一条消息所在的位置。消息是流数据表中的行。如果没有指定 offset,或它为负数或超过了流数据表的记录行数,订阅将会从流数据表的当前行开始。offset 与流数据表创建时的第一行对应。如果某些行因为内存限制被删除,在决定订阅开始的位置时,这些行仍然考虑在内 +* AllowExists: 当 AllowExists = true 时,若已存在的订阅被再次订阅,不会抛出异常。默认值为 false +* Throttle: 浮点数,表示 Handler 处理到达的消息之前等待的时间,以秒为单位。默认值为 1。如果没有指定 BatchSize,Throttle 将不会起作用 +* Reconnect: 布尔值,表示订阅中断后,是否会自动重订阅 +* Filter: 一个向量,表示过滤条件。流数据表过滤列在 filter 中的数据才会发布到订阅端,不在 filter 中的数据不会发布 +* Handler:用户自定义的回调函数,用于处理每次流入的数据,仅在支持回调的订阅客户端可用 + +poller 探测到流数据表有新增数据后,会拉取到新数据。无新数据发布时,程序会阻塞在 `poller.Poll` 方法, 直到超时。 + +- 使用 MessageHandler 回调的方式获取新数据 + +首先需要调用者定义数据处理器 Handler。Handler 需要实现 `streaming.MessageHandler` 接口。 + +```go +type sampleHandler struct{} + +func (s *sampleHandler) DoEvent(msg streaming.IMessage) { + // do something +} +``` + +在启动订阅时,把 Handler 实例作为参数传入订阅函数。包括单协程回调和多协程回调两种方式。 + + 1. 单协程回调 GoroutineClient + +```go +client := streaming.NewGoroutineClient("localhost", 8100) +req := &streaming.SubscribeRequest{ + Address: "ServerIP:Port", + TableName: "pub", + ActionName: "action1", + Handler: new(sampleHandler), + Offset: 0, + Reconnect: true, +} + +err := client.Subscribe(req) +if err != nil { + return +} +``` + +当流数据表有新增数据时,系统会通知 Go API 调用 sampleHandler 的 DoEvent 方法,将新数据通过 msg 参数传入。 + + 2. 多协程回调(GoroutinePooledClient) + +```go +client := streaming.NewGoroutinePooledClient("localhost", 8100) +req := &streaming.SubscribeRequest{ + Address: "ServerIP:Port", + TableName: "pub", + ActionName: "action1", + Handler: new(sampleHandler), + Offset: 0, + Reconnect: true, +} + +err := client.Subscribe(req) +if err != nil { + return +} +``` + +### 5.2. 断线重连 + +`Reconnect` 参数是一个布尔值,表示订阅意外中断后,是否会自动重新订阅。默认值为 false。 + +若 `Reconnect` 设置为 true 时,订阅意外中断后系统是否以及如何自动重新订阅,取决于订阅中断由哪种原因导致: + +- 如果发布端与订阅端处于正常状态,但是网络中断,那么订阅端会在网络正常时,自动从中断位置重新订阅。 +- 如果发布端崩溃,订阅端会在发布端重启后不断尝试重新订阅。 + - 如果发布端对流数据表启动了持久化,发布端重启后会首先读取硬盘上的数据,直到发布端读取到订阅中断位置的数据,订阅端才能成功重新订阅。 + - 如果发布端没有对流数据表启用持久化,那么订阅端将自动重新订阅失败。 +- 如果订阅端崩溃,订阅端重启后不会自动重新订阅,需要重新执行 `Subscribe` 函数。 + +以下例子在订阅时,设置 `Reconnect` 为 true: + +```go +client := streaming.NewPollingClient("localhost", 8101) +req := &streaming.SubscribeRequest{ + Address: "ServerIP:Port", + TableName: "pub1", + ActionName: "action1", + Offset: 0, + Reconnect: true, +} + +_,err := client.Subscribe(req) +if err != nil { + return +} +``` + +### 5.3. 启用 Filter + +`Filter` 参数是一个向量。该参数需要发布端配合 `setStreamTableFilterColumn` 函数一起使用。使用 `setStreamTableFilterColumn` 指定流数据表的过滤列,流数据表过滤列在 filter 中的数据才会发布到订阅端,不在 filter 中的数据不会发布。 + +以下例子将一个包含元素1和2的整数类型向量作为 `Subscribe` 的 Filter 参数: + +```go +dtl, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1, 2}) +if err != nil { + return +} + +client := streaming.NewPollingClient("localhost", 8101) +req := &streaming.SubscribeRequest{ + Address: "ServerIP:Port", + TableName: "pub1", + ActionName: "action1", + Offset: 0, + Reconnect: true, + Filter: model.NewVector(dtl), +} + +_, err = client.Subscribe(req) +if err != nil { + return +} +``` + +### 5.4. 取消订阅 + +每一个订阅都有一个订阅主题 `topic` 作为唯一标识。如果订阅时 `topic` 已经存在,那么会订阅失败。这时需要通过 `UnSubscribe` 函数取消订阅才能再次订阅。 +```go +err = client.UnSubscribe(req) +if err != nil { + return +} +``` \ No newline at end of file diff --git a/api/account.go b/api/account.go new file mode 100644 index 0000000..4d5400b --- /dev/null +++ b/api/account.go @@ -0,0 +1,30 @@ +package api + +import "fmt" + +// AccountAPI interface declares apis about account. +type AccountAPI interface { + // Login dolphindb. + // See DolphinDB function `login`: https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/l/login.html?highlight=login + Login(l *LoginRequest) error + + // Logout dolphindb. + // See DolphinDB function `logout`: https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/l/logout.html?highlight=logout + Logout() error +} + +// Login dolphindb. +// See DolphinDB function `login`: https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/l/login.html?highlight=login +func (c *dolphindb) Login(l *LoginRequest) error { + _, err := c.RunScript(fmt.Sprintf("login('%s','%s')", l.UserID, l.Password)) + + return err +} + +// Logout dolphindb. +// See DolphinDB function `logout`: https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/l/logout.html?highlight=logout +func (c *dolphindb) Logout() error { + _, err := c.RunScript("logout()") + + return err +} diff --git a/api/account_test.go b/api/account_test.go new file mode 100644 index 0000000..c66cd8a --- /dev/null +++ b/api/account_test.go @@ -0,0 +1,143 @@ +package api + +import ( + "context" + "net" + "os" + "strings" + "testing" + + "github.com/stretchr/testify/assert" +) + +const testAddress = "127.0.0.1:3003" + +func TestAccount(t *testing.T) { + db, err := NewDolphinDBClient(context.TODO(), testAddress, nil) + assert.Nil(t, err) + + loginReq := new(LoginRequest). + SetPassword("password"). + SetUserID("user") + err = db.Login(loginReq) + assert.Nil(t, err) + + err = db.Logout() + assert.Nil(t, err) +} + +func TestMain(m *testing.M) { + exit := make(chan bool) + + ln, err := net.Listen("tcp", testAddress) + if err != nil { + return + } + go func() { + for !isExit(exit) { + conn, err := ln.Accept() + if err != nil { + return + } + + go handleData(conn) + } + + ln.Close() + }() + + exitCode := m.Run() + + close(exit) + + os.Exit(exitCode) +} + +func handleData(conn net.Conn) { + defer conn.Close() + + res := make([]byte, 0) + for { + buf := make([]byte, 512) + l, err := conn.Read(buf) + if err != nil { + continue + } + + res = append(res, buf[0:l]...) + script := string(res) + length := len(res) + var resp []byte + switch { + case (length == 62 || length == 57) && strings.Index(script, "schema") > 0: + resp = []byte{0x32, 0x37, 0x38, 0x30, 0x30, 0x39, 0x30, 0x36, 0x35, 0x30, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, 0x19, 0x05, 0x12, 0x01, 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x74, 0x65, 0x73, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x00, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x00, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x00, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x00, 0x63, 0x6f, 0x6c, 0x44, 0x65, 0x66, 0x73, 0x00, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x50, 0x61, 0x74, 0x68, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x19, 0x01, 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x01, 0x65, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x4a, 0x4b, 0x00, 0x00, 0x49, 0x4b, 0x00, 0x00, 0x48, 0x4b, 0x00, 0x00, 0x47, 0x4b, 0x00, 0x00, 0x46, 0x4b, 0x00, 0x00, 0x45, 0x4b, 0x00, 0x00, 0x44, 0x4b, 0x00, 0x00, 0x43, 0x4b, 0x00, 0x00, 0x42, 0x4b, 0x00, + 0x00, 0x41, 0x4b, 0x00, 0x00, 0x40, 0x4b, 0x00, 0x00, 0x3f, 0x4b, 0x00, 0x00, 0x3e, 0x4b, 0x00, 0x00, 0x3d, 0x4b, 0x00, 0x00, 0x3c, 0x4b, 0x00, 0x00, 0x3b, 0x4b, 0x00, 0x00, 0x3a, 0x4b, 0x00, 0x00, 0x39, 0x4b, 0x00, 0x00, 0x38, 0x4b, 0x00, 0x00, 0x37, 0x4b, 0x00, 0x00, 0x36, 0x4b, 0x00, 0x00, 0x35, 0x4b, 0x00, 0x00, 0x34, 0x4b, 0x00, 0x00, 0x33, 0x4b, 0x00, 0x00, 0x32, 0x4b, 0x00, 0x00, 0x31, 0x4b, 0x00, 0x00, 0x30, 0x4b, 0x00, 0x00, 0x2f, 0x4b, 0x00, 0x00, 0x2e, 0x4b, 0x00, 0x00, 0x2d, 0x4b, 0x00, 0x00, 0x2c, 0x4b, 0x00, 0x00, 0x2b, 0x4b, 0x00, 0x00, 0x2a, 0x4b, 0x00, 0x00, 0x29, 0x4b, 0x00, 0x00, 0x28, 0x4b, 0x00, 0x00, 0x27, + 0x4b, 0x00, 0x00, 0x26, 0x4b, 0x00, 0x00, 0x25, 0x4b, 0x00, 0x00, 0x24, 0x4b, 0x00, 0x00, 0x23, 0x4b, 0x00, 0x00, 0x22, 0x4b, 0x00, 0x00, 0x21, 0x4b, 0x00, 0x00, 0x20, 0x4b, 0x00, 0x00, 0x1f, 0x4b, 0x00, 0x00, 0x1e, 0x4b, 0x00, 0x00, 0x1d, 0x4b, 0x00, 0x00, 0x1c, 0x4b, 0x00, 0x00, 0x1b, 0x4b, 0x00, 0x00, 0x1a, 0x4b, 0x00, 0x00, 0x19, 0x4b, 0x00, 0x00, 0x18, 0x4b, 0x00, 0x00, 0x17, 0x4b, 0x00, 0x00, 0x16, 0x4b, 0x00, 0x00, 0x15, 0x4b, 0x00, 0x00, 0x14, 0x4b, 0x00, 0x00, 0x13, 0x4b, 0x00, 0x00, 0x12, 0x4b, 0x00, 0x00, 0x11, 0x4b, 0x00, 0x00, 0x10, 0x4b, 0x00, 0x00, 0x0f, 0x4b, 0x00, 0x00, 0x0e, 0x4b, 0x00, 0x00, 0x0d, 0x4b, 0x00, + 0x00, 0x0c, 0x4b, 0x00, 0x00, 0x0b, 0x4b, 0x00, 0x00, 0x0a, 0x4b, 0x00, 0x00, 0x09, 0x4b, 0x00, 0x00, 0x08, 0x4b, 0x00, 0x00, 0x07, 0x4b, 0x00, 0x00, 0x06, 0x4b, 0x00, 0x00, 0x05, 0x4b, 0x00, 0x00, 0x04, 0x4b, 0x00, 0x00, 0x03, 0x4b, 0x00, 0x00, 0x02, 0x4b, 0x00, 0x00, 0x01, 0x4b, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0xff, 0x4a, 0x00, 0x00, 0xfe, 0x4a, 0x00, 0x00, 0xfd, 0x4a, 0x00, 0x00, 0xfc, 0x4a, 0x00, 0x00, 0xfb, 0x4a, 0x00, 0x00, 0xfa, 0x4a, 0x00, 0x00, 0xf9, 0x4a, 0x00, 0x00, 0xf8, 0x4a, 0x00, 0x00, 0xf7, 0x4a, 0x00, 0x00, 0xf6, 0x4a, 0x00, 0x00, 0xf5, 0x4a, 0x00, 0x00, 0xf4, 0x4a, 0x00, 0x00, 0xf3, 0x4a, 0x00, 0x00, 0xf2, + 0x4a, 0x00, 0x00, 0xf1, 0x4a, 0x00, 0x00, 0xf0, 0x4a, 0x00, 0x00, 0xef, 0x4a, 0x00, 0x00, 0xee, 0x4a, 0x00, 0x00, 0xed, 0x4a, 0x00, 0x00, 0xec, 0x4a, 0x00, 0x00, 0xeb, 0x4a, 0x00, 0x00, 0xea, 0x4a, 0x00, 0x00, 0xe9, 0x4a, 0x00, 0x00, 0xe8, 0x4a, 0x00, 0x00, 0xe7, 0x4a, 0x00, 0x00, 0xe6, 0x4a, 0x00, 0x00, 0x04, 0x00, 0x05, 0x00, 0x00, 0x00, 0x12, 0x00, 0x4f, 0x4c, 0x41, 0x50, 0x00, 0x12, 0x00, 0x41, 0x4c, 0x4c, 0x00, 0x12, 0x00, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x61, 0x6d, 0x65, 0x00, 0x74, 0x79, 0x70, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x00, 0x74, + 0x79, 0x70, 0x65, 0x49, 0x6e, 0x74, 0x00, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x64, 0x61, 0x74, 0x65, 0x00, 0x73, 0x79, 0x6d, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x44, 0x41, 0x54, 0x45, 0x00, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x64, 0x61, 0x74, 0x65, 0x00, 0x73, 0x79, 0x6d, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x00, 0x48, 0x41, 0x53, 0x48, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00} + case isSuccessRequest(length): + resp = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x30, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a} + case length == 149 || length == 141: + resp = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, 0x04, 0x00, 0x01, 0x00, 0x00, 0x00} + case length == 43 || length == 50: + resp = []byte{0x31, 0x38, 0x32, 0x33, 0x32, 0x38, 0x39, 0x31, 0x37, 0x36, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, + 0x4b, 0x0a, 0x00, 0x06, 0x03, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x69, 0x64, 0x00, 0x78, 0x00, 0x12, 0x01, 0x03, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x58, 0x4f, 0x4d, 0x00, 0x47, 0x53, 0x00, 0x41, 0x41, 0x50, 0x4c, 0x00, 0x10, 0x01, + 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x86, 0x59, 0x40, 0x33, 0x33, 0x33, 0x33, 0x33, + 0xb3, 0x40, 0x40, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x52, 0x40} + case length == 82 && strings.Index(script, "getClusterLiveDataNodes") > 0: + resp = []byte{0x32, 0x38, 0x35, 0x36, 0x30, 0x35, 0x34, 0x32, 0x36, 0x33, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, 0x12, + 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x3a, 0x33, + 0x30, 0x30, 0x33, 0x3a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x33, 0x30, 0x30, 0x33, 0x00} + case length == 35 && strings.Index(script, "schema") > 0: + resp = []byte{0x32, 0x37, 0x38, 0x30, 0x30, 0x39, 0x30, 0x36, 0x35, 0x30, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, + 0x0a, 0x19, 0x05, 0x12, 0x01, 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x00, 0x70, 0x61, 0x72, + 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x63, 0x6f, 0x6c, 0x44, 0x65, 0x66, 0x73, 0x00, 0x19, 0x01, + 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x73, 0x79, 0x6d, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x74, 0x00, 0x04, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x04, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x74, 0x79, + 0x70, 0x65, 0x49, 0x6e, 0x74, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, + 0x00, 0x12, 0x00, 0x00, 0x00} + case length == 51 || length == 55: + resp = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, 0x01, 0x00, 0x00} + } + + if len(resp) > 0 { + _, err = conn.Write(resp) + if err != nil { + return + } + + res = make([]byte, 0) + } + } +} + +var successLength = []int{33, 48, 95, 67, 52, 57, 54, 63, 15, 40, 42, 45, 44, 78, 89, 90, 79, 87, 64, 53, 34, 41, 49, 60, 150, 30, 59} + +func isSuccessRequest(l int) bool { + for _, v := range successLength { + if v == l { + return true + } + } + + return false +} + +func isExit(exit <-chan bool) bool { + select { + case <-exit: + return true + default: + return false + } +} diff --git a/api/client.go b/api/client.go new file mode 100644 index 0000000..f903aca --- /dev/null +++ b/api/client.go @@ -0,0 +1,457 @@ +package api + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/model" +) + +// DolphinDB interface declares functions to communicate with the dolphindb server. +type DolphinDB interface { + dialer.Conn + + AccountAPI + DatabaseAPI + TableAPI +} + +type dolphindb struct { + dialer.Conn + + addr string + + ctx context.Context +} + +// TableAPI interface declares apis about table. +type TableAPI interface { + // ExistsTable checks whether the table is existed. + // See DolphinDB function `existsTable`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/FunctionReferences/e/existsTable.html?highlight=existstable + ExistsTable(e *ExistsTableRequest) (bool, error) + // Table creates an in-memory table with columns. + // See DolphinDB function `table`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/FunctionReferences/t/table.html?highlight=table + Table(t *TableRequest) (*Table, error) + // TableWithCapacity creates an in-memory table with a specific capacity. + // See DolphinDB function `table`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/FunctionReferences/t/table.html?highlight=table + TableWithCapacity(t *TableWithCapacityRequest) (*Table, error) + // SaveTable saves a table. + // See DolphinDB function `saveTable`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/s/saveTable.html?highlight=savetable + SaveTable(s *SaveTableRequest) error + // LoadTable loads a table into memory. + // See DolphinDB function `loadTable`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/FunctionReferences/l/loadTable.html?highlight=loadtable + LoadTable(l *LoadTableRequest) (*Table, error) + // LoadText loads text from a file. + // See DolphinDB function `loadText`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/FunctionReferences/l/loadText.html?highlight=loadtext + LoadText(l *LoadTextRequest) (*Table, error) + // SaveText saves text into a file. + // See DolphinDB function `saveText`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/s/saveText.html?highlight=savetext + SaveText(l *SaveTextRequest) error + // PloadText loads text from a file. + // See DolphinDB function `pLoadText`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/FunctionReferences/p/ploadText.html?highlight=ploadtext + PloadText(l *PloadTextRequest) (*Table, error) + // LoadTableBySQL loads a table using a SQL query. + // See DolphinDB function `loadTableBySQL`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/FunctionReferences/l/loadTableBySQL.html?highlight=loadtablebysql + LoadTableBySQL(l *LoadTableBySQLRequest) (*Table, error) + // DropPartition drops the specified partition from a database. + // See DolphinDB function `dropPartition`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/d/dropPartition.html?highlight=droppartition + DropPartition(l *DropPartitionRequest) error + // DropTable drops a table. + // See DolphinDB function `dropTable`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/d/dropTable.html?highlight=droptable + DropTable(d *DropTableRequest) error + // Undef releases the specified objects + // See DolphinDB function `undef`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/u/undef.html?highlight=unde + Undef(u *UndefRequest) error + // UndefAll releases all objects + // See DolphinDB function `undefAll`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/u/undef.html?highlight=unde + UndefAll() error + // ClearAllCache clears all cache. + // See DolphinDB function `clearAllCache`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/c/clearAllCache.html?highlight=clearallcache + ClearAllCache(r *ClearAllCacheRequest) error +} + +// DatabaseAPI interface declares apis about database. +type DatabaseAPI interface { + // ExistsDatabase checks whether the database already exists. + // See DolphinDB function `existsDatabase`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/FunctionReferences/e/existsDatabase.html?highlight=existsdatabase + ExistsDatabase(e *ExistsDatabaseRequest) (bool, error) + + // Database creates a database + // See DolphinDB function `database`:https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/FunctionReferences/d/database.html?highlight=database + Database(d *DatabaseRequest) (*Database, error) + + // DropDatabase drops a database. + // See DolphinDB function `dropDatabase`: https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/CommandsReferences/d/dropDatabase.html?highlight=dropdatabase + DropDatabase(d *DropDatabaseRequest) error +} + +// NewDolphinDBClient returns an instance of DolphinDB according to the addr and +// the flags which will affect the subsequent api calls. +func NewDolphinDBClient(ctx context.Context, addr string, flags *dialer.BehaviorOptions) (DolphinDB, error) { + var err error + + c := &dolphindb{ + ctx: ctx, + addr: addr, + } + + c.Conn, err = dialer.NewConn(ctx, c.addr, flags) + if err != nil { + return nil, err + } + + return c, nil +} + +// NewSimpleDolphinDBClient returns an instance of DolphinDB which has logged in. +func NewSimpleDolphinDBClient(ctx context.Context, addr, userID, pwd string) (DolphinDB, error) { + var err error + + c := &dolphindb{ + ctx: ctx, + addr: addr, + } + + c.Conn, err = dialer.NewSimpleConn(ctx, addr, userID, pwd) + if err != nil { + return nil, err + } + + return c, nil +} + +func (c *dolphindb) ExistsDatabase(e *ExistsDatabaseRequest) (bool, error) { + res, err := c.RunScript(fmt.Sprintf("existsDatabase('%s')", e.Path)) + if err != nil { + return false, err + } + + b, ok := res.(*model.Scalar) + if !ok { + return false, errors.New("invalid response content") + } + + return b.Bool() +} + +func (c *dolphindb) Database(d *DatabaseRequest) (*Database, error) { + cmd := generateCreateDatabaseParam(d) + if d.DBHandle == "" { + d.DBHandle = generateDBName() + } + + _, err := c.RunScript(fmt.Sprintf("%s=database(%s)", d.DBHandle, cmd)) + if err != nil { + return nil, err + } + + return &Database{ + db: c, + Name: d.DBHandle, + }, nil +} + +func (c *dolphindb) DropDatabase(d *DropDatabaseRequest) error { + _, err := c.RunScript(fmt.Sprintf("dropDatabase('%s')", d.Directory)) + if err != nil { + return err + } + + return nil +} + +func (c *dolphindb) ExistsTable(t *ExistsTableRequest) (bool, error) { + res, err := c.RunScript(fmt.Sprintf("existsTable('%s','%s')", t.DBPath, t.TableName)) + if err != nil { + return false, err + } + + b, ok := res.(*model.Scalar) + if !ok { + return false, errors.New("invalid response content") + } + + return b.Bool() +} + +func (c *dolphindb) SaveTable(t *SaveTableRequest) error { + if t.DBHandle == "" { + t.DBHandle = generateDBName() + _, err := c.RunScript(fmt.Sprintf("%s=database('%s')", t.DBHandle, t.DBPath)) + if err != nil { + return err + } + } + _, err := c.RunScript(fmt.Sprintf("saveTable(%s)", generateSaveTableParam(t))) + if err != nil { + return err + } + + return nil +} + +func (c *dolphindb) LoadText(l *LoadTextRequest) (*Table, error) { + if l.Delimiter == "" { + l.Delimiter = "," + } + handle := generateTableName() + _, err := c.RunScript(fmt.Sprintf(`%s=loadText("%s","%s")`, handle, l.FileName, l.Delimiter)) + if err != nil { + return nil, err + } + + df, err := c.RunScript(fmt.Sprintf(`select * from %s`, handle)) + if err != nil { + return nil, err + } + + return &Table{ + db: c, + Data: df.(*model.Table), + Handle: handle, + }, nil +} + +// func (c *dolphindb) LoadTextEx(l *LoadTextExRequest) (*Table, error) { +// if l.Delimiter == "" { +// l.Delimiter = "," +// } + +// handle := generateTableName() + +// by := new(bytes.Buffer) +// by.WriteString(fmt.Sprintf(`%s=loadTextEx(%s,"%s"`, handle, l.DBName, handle)) +// if len(l.PartitionColumns) != 0 { +// by.WriteString(fmt.Sprintf(",`%s,\"%s\",%s", strings.Join(l.PartitionColumns, "`"), l.RemoteFilePath, l.Delimiter)) +// } else { +// by.WriteString(fmt.Sprintf(`, ,"%s", %s)`, l.RemoteFilePath, l.Delimiter)) +// } +// _, err := c.RunScript(by.String()) +// if err != nil { +// return nil, err +// } + +// df, err := c.RunScript(fmt.Sprintf(`select * from %s`, handle)) +// if err != nil { +// return nil, err +// } + +// return new(Table). +// setDB(c). +// SetHandle(handle). +// setData(df.(*model.Table)), nil +// } + +func (c *dolphindb) SaveText(l *SaveTextRequest) error { + _, err := c.RunScript(fmt.Sprintf(`saveText(%s,"%s")`, l.Obj, l.FileName)) + if err != nil { + return err + } + + return nil +} + +func (c *dolphindb) PloadText(p *PloadTextRequest) (*Table, error) { + if p.Delimiter == "" { + p.Delimiter = "," + } + handle := generateTableName() + + _, err := c.RunScript(fmt.Sprintf(`%s=ploadText("%s","%s")`, handle, p.FileName, p.Delimiter)) + if err != nil { + return nil, err + } + + df, err := c.RunScript(fmt.Sprintf(`select * from %s`, handle)) + if err != nil { + return nil, err + } + + return &Table{ + db: c, + Data: df.(*model.Table), + Handle: handle, + }, nil +} + +func (c *dolphindb) LoadTable(l *LoadTableRequest) (*Table, error) { + handle := generateTableName() + _, err := c.RunScript(fmt.Sprintf(`%s=loadTable("%s","%s",%s,%t)`, handle, l.Database, l.TableName, l.Partitions, l.MemoryMode)) + if err != nil { + return nil, err + } + + df, err := c.RunScript(fmt.Sprintf(`select * from %s`, handle)) + if err != nil { + return nil, err + } + + return &Table{ + db: c, + Data: df.(*model.Table), + Handle: handle, + }, nil +} + +func (c *dolphindb) LoadTableBySQL(l *LoadTableBySQLRequest) (*Table, error) { + if l.DBHandle == "" { + l.DBHandle = generateDBName() + _, err := c.RunScript(fmt.Sprintf(`%s=database("%s")`, l.DBHandle, l.DBPath)) + if err != nil { + return nil, err + } + } + + _, err := c.RunScript(fmt.Sprintf(`%s=%s.loadTable("%s")`, l.TableName, l.DBHandle, l.TableName)) + if err != nil { + return nil, err + } + + var sql string + if strings.HasPrefix(l.SQL, "sql(") { + _, err := c.RunScript(fmt.Sprintf(`st=%s`, l.SQL)) + if err != nil { + return nil, err + } + + sql = "st" + } else { + sql = "< " + l.SQL + " >" + } + + handle := generateTableName() + _, err = c.RunScript(fmt.Sprintf(`%s=loadTableBySQL(%s)`, handle, sql)) + if err != nil { + return nil, err + } + + df, err := c.RunScript(fmt.Sprintf(`select * from %s`, handle)) + if err != nil { + return nil, err + } + + return &Table{ + db: c, + Data: df.(*model.Table), + Handle: handle, + }, nil +} + +func (c *dolphindb) TableWithCapacity(t *TableWithCapacityRequest) (*Table, error) { + _, err := c.RunScript(fmt.Sprintf("%s=table(%d:%d, `%s, [%s])", t.TableName, t.Capacity, + t.Size, strings.Join(t.ColNames, "`"), strings.Join(t.ColTypes, ","))) + if err != nil { + return nil, err + } + + df, err := c.RunScript(fmt.Sprintf(`select * from %s`, t.TableName)) + if err != nil { + return nil, err + } + + return &Table{ + db: c, + Data: df.(*model.Table), + Handle: t.TableName, + }, nil +} + +func (c *dolphindb) Table(t *TableRequest) (*Table, error) { + names := make([]string, len(t.TableParams)) + for k, v := range t.TableParams { + names[k] = v.Key + _, err := c.RunScript(fmt.Sprintf("%s=%s", v.Key, v.Value)) + if err != nil { + return nil, err + } + } + _, err := c.RunScript(fmt.Sprintf("%s=table(%v)", t.TableName, strings.Join(names, ", "))) + if err != nil { + return nil, err + } + + df, err := c.RunScript(fmt.Sprintf(`select * from %s`, t.TableName)) + if err != nil { + return nil, err + } + + return &Table{ + db: c, + Data: df.(*model.Table), + Handle: t.TableName, + }, nil +} + +func (c *dolphindb) DropTable(d *DropTableRequest) error { + if d.DBHandle == "" { + d.DBHandle = generateDBName() + _, err := c.RunScript(fmt.Sprintf(`%s=database("%s")`, d.DBHandle, d.DBPath)) + if err != nil { + return err + } + } + + _, err := c.RunScript(fmt.Sprintf("dropTable(%s,'%s')", d.DBHandle, d.TableName)) + if err != nil { + return err + } + + return nil +} + +func (c *dolphindb) DropPartition(d *DropPartitionRequest) error { + if d.DBHandle == "" { + d.DBHandle = generateDBName() + _, err := c.RunScript(fmt.Sprintf(`%s=database("%s")`, d.DBHandle, d.DBPath)) + if err != nil { + return err + } + } + + _, err := c.RunScript(fmt.Sprintf("dropPartition(%s, %s, tableName=`%s)", d.DBHandle, d.PartitionPaths, d.TableName)) + if err != nil { + return err + } + + return nil +} + +func (c *dolphindb) Undef(u *UndefRequest) error { + s := u.Obj + if u.ObjType != "" { + s += "," + u.ObjType + } + + _, err := c.RunScript(fmt.Sprintf("undef(%s)", s)) + if err != nil { + return err + } + + return nil +} + +func (c *dolphindb) UndefAll() error { + _, err := c.RunScript("undef all") + if err != nil { + return err + } + + return nil +} + +func (c *dolphindb) ClearAllCache(r *ClearAllCacheRequest) error { + var err error + if r.IsDFS { + _, err = c.RunScript("pnodeRun(clearAllCache)") + } else { + _, err = c.RunScript("clearAllCache()") + } + if err != nil { + return err + } + + return nil +} diff --git a/api/client_test.go b/api/client_test.go new file mode 100644 index 0000000..77ea36d --- /dev/null +++ b/api/client_test.go @@ -0,0 +1,130 @@ +package api + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestClient(t *testing.T) { + _, err := NewDolphinDBClient(context.TODO(), testAddress, nil) + assert.Nil(t, err) + + db, err := NewSimpleDolphinDBClient(context.TODO(), testAddress, "user", "password") + assert.Nil(t, err) + + err = db.Connect() + assert.Nil(t, err) + + existsDatabaseReq := new(ExistsDatabaseRequest). + SetPath("dfs://db") + b, err := db.ExistsDatabase(existsDatabaseReq) + assert.Nil(t, err) + assert.Equal(t, b, false) + + createDBReq := new(DatabaseRequest). + SetDirectory("/db"). + SetPartitionType("VALUE"). + SetPartitionScheme("1..10"). + SetLocations(""). + SetEngine(""). + SetAtomic(""). + SetDBHandle("db") + d, err := db.Database(createDBReq) + assert.Nil(t, err) + assert.Equal(t, d.Name, "db") + + dropReq := new(DropDatabaseRequest). + SetDirectory("/db") + err = db.DropDatabase(dropReq) + assert.Nil(t, err) + + existTableReq := new(ExistsTableRequest). + SetDBPath("/db1"). + SetTableName("test") + b, err = db.ExistsTable(existTableReq) + assert.Nil(t, err) + assert.Equal(t, b, false) + + saveTableReq := new(SaveTableRequest). + SetTable("test") + err = db.SaveTable(saveTableReq) + assert.Nil(t, err) + + loadTextReq := new(LoadTextRequest). + SetFileName("/stock.csv") + tb, err := db.LoadText(loadTextReq) + assert.Nil(t, err) + assert.Equal(t, tb.GetSession(), "20267359") + + saveTextReq := new(SaveTextRequest). + SetFileName("/stock.csv"). + SetObj("test") + err = db.SaveText(saveTextReq) + assert.Nil(t, err) + + ploadReq := new(PloadTextRequest). + SetFileName("/stock.csv") + tb, err = db.PloadText(ploadReq) + assert.Nil(t, err) + assert.Equal(t, tb.GetSession(), "20267359") + + loadTableReq := new(LoadTableRequest). + SetTableName("test"). + SetDatabase("/db") + tb, err = db.LoadTable(loadTableReq) + assert.Nil(t, err) + assert.Equal(t, tb.GetSession(), "20267359") + + loadBySQLReq := new(LoadTableBySQLRequest). + SetSQL("sql()"). + SetDBPath("dfs://db"). + SetTableName("test") + tb, err = db.LoadTableBySQL(loadBySQLReq) + assert.Nil(t, err) + assert.Equal(t, tb.GetSession(), "20267359") + + tbCapReq := new(TableWithCapacityRequest). + SetTableName("test").SetCapacity(100).SetSize(3). + SetColNames([]string{"name", "id", "value"}). + SetColTypes([]string{"string", "INT", "DOUBLE"}) + tb, err = db.TableWithCapacity(tbCapReq) + assert.Nil(t, err) + assert.Equal(t, tb.GetSession(), "20267359") + + tbReq := new(TableRequest). + SetTableName("test"). + AddTableParam("id", "`XOM`GS`AAPL"). + AddTableParam("x", "102.1 33.4 73.6") + tb, err = db.Table(tbReq) + assert.Nil(t, err) + assert.Equal(t, tb.GetSession(), "20267359") + + dropTableReq := new(DropTableRequest). + SetTableName("test"). + SetDBHandle("db") + err = db.DropTable(dropTableReq) + assert.Nil(t, err) + + dropPartitionReq := new(DropPartitionRequest). + SetPartitionPaths("GS"). + SetTableName("test"). + SetDBHandle("db") + err = db.DropPartition(dropPartitionReq) + assert.Nil(t, err) + + undefReq := new(UndefRequest). + SetObj("`valu"). + SetObjType("INT") + err = db.Undef(undefReq) + assert.Nil(t, err) + + c := new(ClearAllCacheRequest). + SetIsDFS(true) + err = db.ClearAllCache(c) + assert.Nil(t, err) + + err = db.UndefAll() + assert.Nil(t, err) +} diff --git a/api/database.go b/api/database.go new file mode 100644 index 0000000..6783da5 --- /dev/null +++ b/api/database.go @@ -0,0 +1,100 @@ +package api + +import ( + "bytes" + "fmt" + "strings" + + "github.com/dolphindb/api-go/model" +) + +// Database is used to call table api based on the name of db. +type Database struct { + db *dolphindb + + Name string +} + +// GetSession returns the sessionID of the session. +func (c *Database) GetSession() string { + return c.db.GetSession() +} + +// CreateTable creates an in-memory table in the database and returns the table instance. +func (c *Database) CreateTable(t *CreateTableRequest) (*Table, error) { + handle := generateTableName() + + by := strings.Builder{} + by.WriteString(handle) + by.WriteString("=") + by.WriteString(c.Name) + by.WriteString(".createTable(") + by.WriteString(t.SrcTable) + by.WriteString(",`") + by.WriteString(t.DimensionTableName) + + if len(t.SortColumns) > 0 { + by.WriteString(",sortColumns=`") + by.WriteString(strings.Join(t.SortColumns, "`")) + } + + by.WriteString(")") + + _, err := c.db.RunScript(by.String()) + if err != nil { + return nil, err + } + + df, err := c.db.RunScript(fmt.Sprintf(`select * from %s`, handle)) + if err != nil { + return nil, err + } + + return &Table{ + db: c.db, + Data: df.(*model.Table), + Handle: handle, + }, nil +} + +// CreatePartitionedTable creates a partitioned table in the database and returns the table instance. +func (c *Database) CreatePartitionedTable(p *CreatePartitionedTableRequest) (*Table, error) { + handle := generateTableName() + by := new(bytes.Buffer) + + by.WriteString(fmt.Sprintf("%s=%s.createPartitionedTable(%s, `%s, `%s", handle, c.Name, + p.SrcTable, p.PartitionedTableName, strings.Join(p.PartitionColumns, "`"))) + + if len(p.CompressMethods) > 0 { + by.WriteString(",compressMethods={") + for k, v := range p.CompressMethods { + by.WriteString(fmt.Sprintf(`%s:"%s",`, k, v)) + } + by.Truncate(by.Len() - 1) + } + + if len(p.SortColumns) > 0 { + by.WriteString(fmt.Sprintf(",sortColumns=`%s", strings.Join(p.SortColumns, "`"))) + } + + if len(p.KeepDuplicates) > 0 { + by.WriteString(",keepDuplicates=" + p.KeepDuplicates) + } + by.WriteString(")") + + _, err := c.db.RunScript(by.String()) + if err != nil { + return nil, err + } + + df, err := c.db.RunScript(fmt.Sprintf(`select * from %s`, handle)) + if err != nil { + return nil, err + } + + return &Table{ + db: c.db, + Data: df.(*model.Table), + Handle: handle, + }, nil +} diff --git a/api/database_test.go b/api/database_test.go new file mode 100644 index 0000000..5fd9b2f --- /dev/null +++ b/api/database_test.go @@ -0,0 +1,38 @@ +package api + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDatabase(t *testing.T) { + db, err := NewDolphinDBClient(context.TODO(), testAddress, nil) + assert.Nil(t, err) + + err = db.Connect() + assert.Nil(t, err) + + d := &Database{ + db: db.(*dolphindb), + } + + cReq := new(CreateTableRequest). + SetSrcTable("test"). + SetDimensionTableName("dst") + tb, err := d.CreateTable(cReq) + assert.Nil(t, err) + assert.Equal(t, d.GetSession(), tb.GetSession()) + + createPartitionReq := new(CreatePartitionedTableRequest). + SetSrcTable("test"). + SetPartitionedTableName("partitioned"). + SetPartitionColumns([]string{"id"}). + SetCompressMethods(map[string]string{"id": "delta"}). + SetSortColumns([]string{"id"}). + SetKeepDuplicates("true") + tb, err = d.CreatePartitionedTable(createPartitionReq) + assert.Nil(t, err) + assert.Equal(t, d.GetSession(), tb.GetSession()) +} diff --git a/api/patitioned_table_appender.go b/api/patitioned_table_appender.go new file mode 100644 index 0000000..28c0735 --- /dev/null +++ b/api/patitioned_table_appender.go @@ -0,0 +1,351 @@ +package api + +import ( + "errors" + "fmt" + "strings" + + "github.com/dolphindb/api-go/domain" + "github.com/dolphindb/api-go/model" +) + +// PartitionedTableAppender is used to append tables into a partitioned table. +type PartitionedTableAppender struct { + partitionColumnIdx int32 + cols int + goroutineCount int + partitionType int32 + chunkIndices [][]int + appendScript string + + columnCategories []model.CategoryString + columnTypes []model.DataTypeByte + + pool *DBConnectionPool + partitionSchema model.DataForm + tableInfo *model.Dictionary + domain domain.Domain + partitionColType model.DataTypeByte +} + +// PartitionedTableAppenderOption is the options of PartitionedTableAppender. +type PartitionedTableAppenderOption struct { + // DBPath of partitioned table + DBPath string + // Name of partitioned table + TableName string + // the partitioning column name + PartitionCol string + // the method used to append the table + AppendFunction string + + // object of DBConnectionPool + Pool *DBConnectionPool +} + +// NewPartitionedTableAppender instantiates a new PartitionedTableAppender according to the option. +func NewPartitionedTableAppender(opt *PartitionedTableAppenderOption) (*PartitionedTableAppender, error) { + res, task := initPartitionedTableAppender(opt) + err := res.pool.Execute([]*Task{task}) + if err != nil { + fmt.Printf("Failed to execute task: %s\n", err.Error()) + return nil, err + } + + if !task.IsSuccess() { + fmt.Printf("Failed to execute task: %s\n", task.err.Error()) + return nil, task.err + } + + err = res.handlePartitionColumnName(task, opt) + if err != nil { + fmt.Printf("Failed to handle PartitionColumnName: %s\n", err.Error()) + return nil, err + } + + dt, err := res.tableInfo.Get("colDefs") + if err != nil { + fmt.Printf("Failed to get colDefs from table: %s\n", err.Error()) + return nil, err + } + + tb := dt.Value().(*model.Table) + res.cols = tb.Rows() + res.columnCategories = make([]model.CategoryString, res.cols) + res.columnTypes = make([]model.DataTypeByte, res.cols) + + vct := tb.GetColumnByName("typeInt") + for i := 0; i < res.cols; i++ { + raw := vct.Data.ElementValue(i) + res.columnTypes[i] = model.DataTypeByte(raw.(int32)) + res.columnCategories[i] = model.GetCategory(res.columnTypes[i]) + } + + res.domain, err = domain.CreateDomain(domain.GetPartitionType(int(res.partitionType)), res.partitionColType, res.partitionSchema) + return res, err +} + +// Close closes the connection pool. +func (p *PartitionedTableAppender) Close() error { + if p.pool.isClosed { + return nil + } + + return p.pool.Close() +} + +// Append appends the table to the partitioned table which has been set when calling NewPartitionedTableAppender. +func (p *PartitionedTableAppender) Append(tb *model.Table) (int, error) { + if p.cols != tb.Columns() { + return 0, errors.New("the input table doesn't match the schema of the target table") + } + + for i := 0; i < p.cols; i++ { + curCol := tb.GetColumnByIndex(i) + colDateType := curCol.GetDataType() + err := p.checkColumnType(i, model.GetCategory(colDateType), colDateType) + if err != nil { + fmt.Printf("Failed to check column type: %s\n", err.Error()) + return 0, err + } + } + + for i := 0; i < p.goroutineCount; i++ { + p.chunkIndices[i] = make([]int, 0) + } + + keys, err := p.domain.GetPartitionKeys(tb.GetColumnByIndex(int(p.partitionColumnIdx))) + if err != nil { + fmt.Printf("Failed to call GetPartitionKeys: %s\n", err.Error()) + return 0, err + } + + for k, v := range keys { + if v >= 0 { + p.chunkIndices[v%p.goroutineCount] = append(p.chunkIndices[v%p.goroutineCount], k) + } + } + + tasks := p.packTasks(tb) + err = p.pool.Execute(tasks) + if err != nil { + fmt.Printf("Failed to execute tasks: %s\n", err.Error()) + return 0, err + } + + return p.calAffected(tasks) +} + +func (p *PartitionedTableAppender) calAffected(tasks []*Task) (int, error) { + affected := 0 + for _, v := range tasks { + if v == nil { + continue + } + + if !v.IsSuccess() { + return 0, v.err + } + + re := v.GetResult() + if re.GetDataType() == model.DtVoid { + affected += 0 + } else { + sca := re.(*model.Scalar) + val := sca.Value() + affected += int(val.(int32)) + } + } + + return affected, nil +} + +func (p *PartitionedTableAppender) packTasks(tb *model.Table) []*Task { + tasks := make([]*Task, p.goroutineCount) + for i := 0; i < p.goroutineCount; i++ { + chunk := p.chunkIndices[i] + if len(chunk) == 0 { + continue + } + + array := make([]int, len(chunk)) + copy(array, chunk) + tasks[i] = &Task{ + Script: p.appendScript, + Args: []model.DataForm{tb.GetSubtable(array)}, + } + } + + return tasks +} + +func initPartitionedTableAppender(opt *PartitionedTableAppenderOption) (*PartitionedTableAppender, *Task) { + res := &PartitionedTableAppender{ + pool: opt.Pool, + goroutineCount: opt.Pool.GetPoolSize(), + } + + res.chunkIndices = make([][]int, res.goroutineCount) + for k := range res.chunkIndices { + res.chunkIndices[k] = make([]int, 0) + } + + task := &Task{} + if opt.DBPath == "" { + task.Script = fmt.Sprintf("schema(%s)", opt.TableName) + res.appendScript = fmt.Sprintf("tableInsert{%s}", opt.TableName) + } else { + task.Script = fmt.Sprintf("schema(loadTable(\"%s\", \"%s\"))", opt.DBPath, opt.TableName) + res.appendScript = fmt.Sprintf("tableInsert{loadTable(\"%s\", \"%s\")}", opt.DBPath, opt.TableName) + } + + if opt.AppendFunction != "" { + res.appendScript = opt.AppendFunction + } + + return res, task +} + +func (p *PartitionedTableAppender) handlePartitionColumnName(task *Task, opt *PartitionedTableAppenderOption) error { + p.tableInfo = task.GetResult().(*model.Dictionary) + dt, err := p.tableInfo.Get("partitionColumnName") + if err != nil { + fmt.Printf("Failed to get partitionColumnName: %s\n", err.Error()) + return err + } + + partColNames := dt.Value().(model.DataForm) + if partColNames == nil { + return errors.New("can't find specified partition column name") + } + + if partColNames.GetDataForm() == model.DfScalar { + err = p.handleScalar(partColNames, opt) + } else { + err = p.handleVector(partColNames, opt) + } + + return err +} + +func (p *PartitionedTableAppender) handleScalar(partColNames model.DataForm, opt *PartitionedTableAppenderOption) error { + var err error + + sca := partColNames.(*model.Scalar) + if name := sca.DataType.String(); name != opt.PartitionCol { + return errors.New("can't find specified partition column name") + } + + p.partitionColumnIdx, err = getInt32ValueFromDictionary(p.tableInfo, "partitionColumnIndex") + if err != nil { + fmt.Printf("Failed to get partitionColumnIndex from dictionary: %s\n", err.Error()) + return err + } + + dt, err := p.tableInfo.Get("partitionSchema") + if err != nil { + return err + } + + p.partitionSchema = dt.Value().(model.DataForm) + + p.partitionType, err = getInt32ValueFromDictionary(p.tableInfo, "partitionType") + if err != nil { + fmt.Printf("Failed to get partitionType from dictionary: %s\n", err.Error()) + return err + } + + val, err := getInt32ValueFromDictionary(p.tableInfo, "partitionColumnType") + if err != nil { + fmt.Printf("Failed to get partitionColumnType from dictionary: %s\n", err.Error()) + return err + } + + p.partitionColType = model.DataTypeByte(val) + return nil +} + +func (p *PartitionedTableAppender) handleVector(partColNames model.DataForm, opt *PartitionedTableAppenderOption) error { + var err error + + vct := partColNames.(*model.Vector) + names := vct.Data.StringList() + ind := -1 + for k, v := range names { + if strings.EqualFold(v, opt.PartitionCol) { + ind = k + break + } + } + + if ind < 0 { + return errors.New("can't find specified partition column name") + } + + p.partitionColumnIdx, err = getInt32ValueFromTableWithInd(p.tableInfo, "partitionColumnIndex", ind) + if err != nil { + fmt.Printf("Failed to get partitionColumnIndex from dictionary with ind %d: %s\n", ind, err.Error()) + return err + } + + dt, err := p.tableInfo.Get("partitionSchema") + if err != nil { + fmt.Printf("Failed to get partitionSchema: %s\n", err.Error()) + return err + } + + vct = dt.Value().(*model.Vector) + p.partitionSchema = vct.Data.ElementValue(ind).(model.DataForm) + + p.partitionType, err = getInt32ValueFromTableWithInd(p.tableInfo, "partitionType", ind) + if err != nil { + fmt.Printf("Failed to get partitionType from dictionary with ind %d: %s\n", ind, err.Error()) + return err + } + + val, err := getInt32ValueFromTableWithInd(p.tableInfo, "partitionColumnType", ind) + if err != nil { + fmt.Printf("Failed to get partitionColumnType from dictionary with ind %d: %s\n", ind, err.Error()) + return err + } + + p.partitionColType = model.DataTypeByte(val) + return nil +} + +func (p *PartitionedTableAppender) checkColumnType(col int, cat model.CategoryString, dt model.DataTypeByte) error { + expectCategory := p.columnCategories[col] + expectType := p.columnTypes[col] + if cat != expectCategory { + return fmt.Errorf("column %d, expect category %s, got category %s", col, expectCategory, cat) + } else if cat == model.TEMPORAL && dt != expectType { + return fmt.Errorf("column %d, temporal column must have exactly the same type, expect %s, got %s", + col, model.GetDataTypeString(expectType), model.GetDataTypeString(dt)) + } + + return nil +} + +func getInt32ValueFromTableWithInd(dict *model.Dictionary, colName string, ind int) (int32, error) { + dt, err := dict.Get(colName) + if err != nil { + fmt.Printf("Failed to get %s from dictionary: %s\n", colName, err.Error()) + return 0, err + } + + vct := dt.Value().(*model.Vector) + val := vct.Data.ElementValue(ind) + return val.(int32), nil +} + +func getInt32ValueFromDictionary(dict *model.Dictionary, colName string) (int32, error) { + dt, err := dict.Get(colName) + if err != nil { + fmt.Printf("Failed to get %s from dictionary: %s\n", colName, err.Error()) + return 0, err + } + + s := dt.Value().(*model.Scalar) + val := s.DataType.Value() + return val.(int32), nil +} diff --git a/api/patitioned_table_appender_test.go b/api/patitioned_table_appender_test.go new file mode 100644 index 0000000..015e4e5 --- /dev/null +++ b/api/patitioned_table_appender_test.go @@ -0,0 +1,61 @@ +package api + +import ( + "testing" + "time" + + "github.com/dolphindb/api-go/model" + + "github.com/stretchr/testify/assert" +) + +func TestPartitionedTableAppender(t *testing.T) { + opt := &PoolOption{ + Address: testAddress, + UserID: "user", + Password: "password", + PoolSize: 2, + } + + pool, err := NewDBConnectionPool(opt) + assert.Nil(t, err) + assert.Equal(t, pool.GetPoolSize(), 2) + + appenderOpt := &PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://test", + TableName: "pt", + PartitionCol: "sym", + } + + appender, err := NewPartitionedTableAppender(appenderOpt) + assert.Nil(t, err) + + col, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), + time.Date(2022, time.Month(1), 1, 2, 1, 0, 0, time.UTC), time.Date(2022, time.Month(1), 1, 3, 1, 0, 0, time.UTC)}) + assert.Nil(t, err) + + col1, err := model.NewDataTypeListWithRaw(model.DtString, []string{"col1", "col1", "col1"}) + assert.Nil(t, err) + + tb := model.NewTable([]string{"sym"}, []*model.Vector{model.NewVector(col1)}) + n, err := appender.Append(tb) + assert.Equal(t, err.Error(), "the input table doesn't match the schema of the target table") + assert.Equal(t, n, 0) + + tb = model.NewTable([]string{"date", "sym"}, []*model.Vector{model.NewVector(col), model.NewVector(col1)}) + n, err = appender.Append(tb) + assert.Nil(t, err) + assert.Equal(t, n, 1) + + appenderOpt = &PartitionedTableAppenderOption{ + Pool: pool, + TableName: "pt", + PartitionCol: "sym", + } + + appender, err = NewPartitionedTableAppender(appenderOpt) + assert.Nil(t, err) + + assert.Nil(t, appender.Close()) +} diff --git a/api/pool.go b/api/pool.go new file mode 100644 index 0000000..93efbb7 --- /dev/null +++ b/api/pool.go @@ -0,0 +1,194 @@ +package api + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/model" +) + +// DBConnectionPool is the client which helps you to handle tasks with connections. +type DBConnectionPool struct { + isLoadBalance bool + isClosed bool + + loadBalanceAddresses []string + + connections chan dialer.Conn +} + +// PoolOption helps you to configure DBConnectionPool by calling NewDBConnectionPool. +type PoolOption struct { + // the server address + Address string + // the user ID + UserID string + // password of the user + Password string + // the size of connection pool + // only takes effect when LoadBalance is false + PoolSize int + // Whether to enable load balancing. + // If true, getClusterLiveDataNodes will be called to get all available datanode addresses + // and connection to every address will be created. + // If the addresses are not available, you can set LoadBalanceAddresses instead. + LoadBalance bool + + // addresses of load balance + LoadBalanceAddresses []string +} + +// NewDBConnectionPool inits a DBConnectionPool object and configures it with opt, finally returns it. +func NewDBConnectionPool(opt *PoolOption) (*DBConnectionPool, error) { + p := &DBConnectionPool{ + isLoadBalance: opt.LoadBalance, + loadBalanceAddresses: opt.LoadBalanceAddresses, + } + + if opt.PoolSize < 1 { + return nil, errors.New("PoolSize must be greater than 0") + } + + if !opt.LoadBalance { + p.connections = make(chan dialer.Conn, opt.PoolSize) + for i := 0; i < opt.PoolSize; i++ { + db, err := dialer.NewSimpleConn(context.TODO(), opt.Address, opt.UserID, opt.Password) + if err != nil { + fmt.Printf("Failed to instantiate a simple connection: %s\n", err.Error()) + return nil, err + } + + p.connections <- db + } + } else { + err := p.initLoadBalanceConnections(opt) + if err != nil { + fmt.Printf("Failed to instantiate loadBalance connections: %s\n", err.Error()) + return nil, err + } + } + + return p, nil +} + +// Execute executes all task by connections with DBConnectionPool. +func (d *DBConnectionPool) Execute(tasks []*Task) error { + wg := sync.WaitGroup{} + for _, v := range tasks { + if v == nil { + continue + } + + wg.Add(1) + if v.Args != nil { + go func(task *Task) { + conn := <-d.connections + task.result, task.err = conn.RunFunc(task.Script, task.Args) + d.connections <- conn + wg.Done() + }(v) + } else { + go func(task *Task) { + conn := <-d.connections + task.result, task.err = conn.RunScript(task.Script) + d.connections <- conn + wg.Done() + }(v) + } + } + + wg.Wait() + + return nil +} + +// GetPoolSize return the size of DBConnectionPool. +func (d *DBConnectionPool) GetPoolSize() int { + return len(d.connections) +} + +// Close closes all connections in DBConnectionPool. +func (d *DBConnectionPool) Close() error { + if d.isClosed { + return nil + } + + close(d.connections) + + for v := range d.connections { + err := v.Close() + if err != nil { + return err + } + } + + d.isClosed = true + + return nil +} + +// IsClosed checks whether the DBConnectionPool is closed. +func (d *DBConnectionPool) IsClosed() bool { + return d.isClosed +} + +func (d *DBConnectionPool) initLoadBalanceConnections(opt *PoolOption) error { + var address []string + var err error + + d.connections = make(chan dialer.Conn, opt.PoolSize) + if len(d.loadBalanceAddresses) > 0 { + address = d.loadBalanceAddresses + } else { + address, err = d.getLoadBalanceAddress(opt) + if err != nil { + return err + } + } + + for i := 0; i < opt.PoolSize; i++ { + conn, err := dialer.NewSimpleConn(context.TODO(), address[i%len(address)], opt.UserID, opt.Password) + if err != nil { + fmt.Printf("Failed to instantiate a simple connection: %s\n", err.Error()) + return err + } + + d.connections <- conn + } + + return nil +} + +func (d *DBConnectionPool) getLoadBalanceAddress(opt *PoolOption) ([]string, error) { + db, err := dialer.NewSimpleConn(context.TODO(), opt.Address, opt.UserID, opt.Password) + if err != nil { + fmt.Printf("Failed to instantiate a simple connection: %s\n", err.Error()) + return nil, err + } + + defer db.Close() + + df, err := db.RunScript("rpc(getControllerAlias(), getClusterLiveDataNodes{false})") + if err != nil { + fmt.Printf("Failed to get nodes: %s\n", err.Error()) + return nil, err + } + + vct := df.(*model.Vector) + nodes := vct.Data.StringList() + address := make([]string, len(nodes)) + for k, v := range nodes { + fields := strings.Split(v, ":") + if len(fields) < 2 { + return nil, errors.New("invalid data node address: " + v) + } + + address[k] = fmt.Sprintf("%s:%s", fields[0], fields[1]) + } + + return address, nil +} diff --git a/api/pool_test.go b/api/pool_test.go new file mode 100644 index 0000000..621cade --- /dev/null +++ b/api/pool_test.go @@ -0,0 +1,65 @@ +package api + +import ( + "testing" + + "github.com/dolphindb/api-go/model" + + "github.com/stretchr/testify/assert" +) + +func TestPool(t *testing.T) { + opt := &PoolOption{ + Address: testAddress, + UserID: "user", + Password: "password", + PoolSize: 2, + LoadBalance: false, + } + + pool, err := NewDBConnectionPool(opt) + assert.Nil(t, err) + assert.Equal(t, pool.GetPoolSize(), 2) + + dt, err := model.NewDataType(model.DtString, "test") + assert.Nil(t, err) + + s := model.NewScalar(dt) + task := &Task{ + Script: "typestr", + Args: []model.DataForm{s}, + } + + err = pool.Execute([]*Task{task, task, task}) + assert.Nil(t, err) + + err = pool.Execute([]*Task{task}) + assert.Nil(t, err) + + assert.Nil(t, task.GetError()) + assert.Equal(t, task.IsSuccess(), true) + + err = pool.Close() + assert.Nil(t, err) + + opt.LoadBalance = true + + pool, err = NewDBConnectionPool(opt) + assert.Nil(t, err) + assert.Equal(t, pool.GetPoolSize(), 2) + + task1 := &Task{ + Script: "login", + } + + err = pool.Execute([]*Task{task, task1}) + assert.Nil(t, err) + + assert.Nil(t, task.GetError()) + assert.Equal(t, task.IsSuccess(), true) + + assert.False(t, pool.IsClosed()) + err = pool.Close() + assert.Nil(t, err) + assert.True(t, pool.IsClosed()) +} diff --git a/api/request.go b/api/request.go new file mode 100644 index 0000000..92228c1 --- /dev/null +++ b/api/request.go @@ -0,0 +1,551 @@ +package api + +// ExistsDatabaseRequest is the request struct of ExistsDatabase api. +type ExistsDatabaseRequest struct { + Path string +} + +// SetPath sets the Path value of ExistsDatabaseRequest. +func (e *ExistsDatabaseRequest) SetPath(path string) *ExistsDatabaseRequest { + e.Path = path + return e +} + +// DatabaseRequest is the request struct of Database api. +type DatabaseRequest struct { + DBHandle string + Directory string + PartitionType string + PartitionScheme string + Locations string + Engine string + Atomic string +} + +// SetDBHandle sets the DBHandle value of DatabaseRequest. +func (d *DatabaseRequest) SetDBHandle(dbHandle string) *DatabaseRequest { + d.DBHandle = dbHandle + return d +} + +// SetDirectory sets the Directory value of DatabaseRequest. +func (d *DatabaseRequest) SetDirectory(directory string) *DatabaseRequest { + d.Directory = directory + return d +} + +// SetPartitionType sets the PartitionType value of DatabaseRequest. +func (d *DatabaseRequest) SetPartitionType(partitionType string) *DatabaseRequest { + d.PartitionType = partitionType + return d +} + +// SetPartitionScheme sets the PartitionScheme value of DatabaseRequest. +func (d *DatabaseRequest) SetPartitionScheme(partitionScheme string) *DatabaseRequest { + d.PartitionScheme = partitionScheme + return d +} + +// SetLocations sets the Locations value of DatabaseRequest. +func (d *DatabaseRequest) SetLocations(locations string) *DatabaseRequest { + d.Locations = locations + return d +} + +// SetAtomic sets the Atomic value of DatabaseRequest. +func (d *DatabaseRequest) SetAtomic(atomic string) *DatabaseRequest { + d.Atomic = atomic + return d +} + +// SetEngine sets the Engine value of DatabaseRequest. +func (d *DatabaseRequest) SetEngine(engine string) *DatabaseRequest { + d.Engine = engine + return d +} + +// DropDatabaseRequest is the request struct of DropDatabase api. +type DropDatabaseRequest struct { + Directory string +} + +// SetDirectory sets the Directory value of DropDatabaseRequest. +func (d *DropDatabaseRequest) SetDirectory(directory string) *DropDatabaseRequest { + d.Directory = directory + return d +} + +// ExistsTableRequest is the request struct of ExistsTable api. +type ExistsTableRequest struct { + TableName string + DBPath string +} + +// SetTableName sets the TableName value of ExistsTableRequest. +func (e *ExistsTableRequest) SetTableName(name string) *ExistsTableRequest { + e.TableName = name + return e +} + +// SetDBPath sets the DBPath value of ExistsTableRequest. +func (e *ExistsTableRequest) SetDBPath(path string) *ExistsTableRequest { + e.DBPath = path + return e +} + +// SaveTableRequest is the request struct of SaveTable api. +// If you have declared a DBHandle before, you can set it, +// or you should set the DBPath. +type SaveTableRequest struct { + DBHandle string + DBPath string + TableName string + Table string + Appending bool + Compression bool +} + +// SetDBHandle sets the DBHandle value of SaveTableRequest. +func (s *SaveTableRequest) SetDBHandle(name string) *SaveTableRequest { + s.DBHandle = name + return s +} + +// SetDBPath sets the DBPath value of SaveTableRequest. +func (s *SaveTableRequest) SetDBPath(path string) *SaveTableRequest { + s.DBPath = path + return s +} + +// SetTableName sets the TableName value of SaveTableRequest. +func (s *SaveTableRequest) SetTableName(name string) *SaveTableRequest { + s.TableName = name + return s +} + +// SetTable sets the Table value of SaveTableRequest. +func (s *SaveTableRequest) SetTable(path string) *SaveTableRequest { + s.Table = path + return s +} + +// SetAppending sets the Appending value of SaveTableRequest. +func (s *SaveTableRequest) SetAppending(appending bool) *SaveTableRequest { + s.Appending = appending + return s +} + +// SetCompression sets the Compression value of SaveTableRequest. +func (s *SaveTableRequest) SetCompression(compression bool) *SaveTableRequest { + s.Compression = compression + return s +} + +// LoadTextRequest is the request struct of LoadText api. +type LoadTextRequest struct { + FileName string + Delimiter string +} + +// SetFileName sets the FileName value of LoadTextRequest. +func (l *LoadTextRequest) SetFileName(filename string) *LoadTextRequest { + l.FileName = filename + return l +} + +// SetDelimiter sets the Delimiter value of LoadTextRequest. +func (l *LoadTextRequest) SetDelimiter(delimiter string) *LoadTextRequest { + l.Delimiter = delimiter + return l +} + +// type LoadTextExRequest struct { +// DBName string +// TableName string +// RemoteFilePath string +// Delimiter string + +// PartitionColumns []string +// } + +// func (l *LoadTextExRequest) SetDBName(name string) *LoadTextExRequest { +// l.DBName = name +// return l +// } + +// func (l *LoadTextExRequest) SetTableName(name string) *LoadTextExRequest { +// l.TableName = name +// return l +// } + +// func (l *LoadTextExRequest) SetPartitionColumns(cols []string) *LoadTextExRequest { +// l.PartitionColumns = cols +// return l +// } + +// func (l *LoadTextExRequest) SetRemoteFilePath(remoteFilePath string) *LoadTextExRequest { +// l.RemoteFilePath = remoteFilePath +// return l +// } + +// func (l *LoadTextExRequest) SetDelimiter(delimiter string) *LoadTextExRequest { +// l.Delimiter = delimiter +// return l +// } + +// SaveTextRequest is the request struct of SaveText api. +type SaveTextRequest struct { + Obj string + FileName string + // Delimiter string +} + +// SetFileName sets the FileName value of SaveTextRequest. +func (l *SaveTextRequest) SetFileName(filename string) *SaveTextRequest { + l.FileName = filename + return l +} + +// SetObj sets the Obj value of SaveTextRequest. +func (l *SaveTextRequest) SetObj(objName string) *SaveTextRequest { + l.Obj = objName + return l +} + +// PloadTextRequest is the request struct of PloadText api. +type PloadTextRequest struct { + FileName string + Delimiter string +} + +// SetFileName sets the FileName value of PloadTextRequest. +func (p *PloadTextRequest) SetFileName(filename string) *PloadTextRequest { + p.FileName = filename + return p +} + +// SetDelimiter sets the Delimiter value of PloadTextRequest. +func (p *PloadTextRequest) SetDelimiter(delimiter string) *PloadTextRequest { + p.Delimiter = delimiter + return p +} + +// LoadTableRequest is the request struct of LoadTable api. +type LoadTableRequest struct { + Database string + TableName string + MemoryMode bool + + Partitions string +} + +// SetDatabase sets the Database value of LoadTableRequest. +func (l *LoadTableRequest) SetDatabase(database string) *LoadTableRequest { + l.Database = database + return l +} + +// SetMemoryMode sets the MemoryMode value of LoadTableRequest. +func (l *LoadTableRequest) SetMemoryMode(memoryMode bool) *LoadTableRequest { + l.MemoryMode = memoryMode + return l +} + +// SetTableName sets the TableName value of LoadTableRequest. +func (l *LoadTableRequest) SetTableName(name string) *LoadTableRequest { + l.TableName = name + return l +} + +// SetPartitions sets the Partitions value of LoadTableRequest. +func (l *LoadTableRequest) SetPartitions(data string) *LoadTableRequest { + l.Partitions = data + return l +} + +// LoadTableBySQLRequest is the request struct of LoadTableBySQL api. +// If you have declared a DBHandle before, you can set it, +// or you should set the DBPath. +type LoadTableBySQLRequest struct { + DBPath string + TableName string + SQL string + DBHandle string +} + +// SetDBHandle sets the DBHandle value of LoadTableBySQLRequest. +func (l *LoadTableBySQLRequest) SetDBHandle(dbHandle string) *LoadTableBySQLRequest { + l.DBHandle = dbHandle + return l +} + +// SetDBPath sets the DBPath value of LoadTableBySQLRequest. +func (l *LoadTableBySQLRequest) SetDBPath(path string) *LoadTableBySQLRequest { + l.DBPath = path + return l +} + +// SetTableName sets the TableName value of LoadTableBySQLRequest. +func (l *LoadTableBySQLRequest) SetTableName(tableName string) *LoadTableBySQLRequest { + l.TableName = tableName + return l +} + +// SetSQL sets the SQL value of LoadTableBySQLRequest. +func (l *LoadTableBySQLRequest) SetSQL(sql string) *LoadTableBySQLRequest { + l.SQL = sql + return l +} + +// CreatePartitionedTableRequest is the request struct of CreatePartitionedTable api. +type CreatePartitionedTableRequest struct { + SrcTable string + PartitionedTableName string + PartitionColumns []string + CompressMethods map[string]string + SortColumns []string + KeepDuplicates string +} + +// SetCompressMethods sets the CompressMethods value of CreatePartitionedTableRequest. +func (c *CreatePartitionedTableRequest) SetCompressMethods(compressMethods map[string]string) *CreatePartitionedTableRequest { + c.CompressMethods = compressMethods + return c +} + +// SetSortColumns sets the SortColumns value of CreatePartitionedTableRequest. +func (c *CreatePartitionedTableRequest) SetSortColumns(sortColumns []string) *CreatePartitionedTableRequest { + c.SortColumns = sortColumns + return c +} + +// SetKeepDuplicates sets the KeepDuplicates value of CreatePartitionedTableRequest. +func (c *CreatePartitionedTableRequest) SetKeepDuplicates(keepDuplicates string) *CreatePartitionedTableRequest { + c.KeepDuplicates = keepDuplicates + return c +} + +// SetSrcTable sets the SrcTable value of CreatePartitionedTableRequest. +func (c *CreatePartitionedTableRequest) SetSrcTable(name string) *CreatePartitionedTableRequest { + c.SrcTable = name + return c +} + +// SetPartitionedTableName sets the PartitionedTableName value of CreatePartitionedTableRequest. +func (c *CreatePartitionedTableRequest) SetPartitionedTableName(name string) *CreatePartitionedTableRequest { + c.PartitionedTableName = name + return c +} + +// SetPartitionColumns sets the PartitionColumns value of CreatePartitionedTableRequest. +func (c *CreatePartitionedTableRequest) SetPartitionColumns(partitionColumns []string) *CreatePartitionedTableRequest { + c.PartitionColumns = partitionColumns + return c +} + +// CreateTableRequest is the request struct of CreateTable api. +type CreateTableRequest struct { + SrcTable string + DimensionTableName string + SortColumns []string +} + +// SetSortColumns sets the SortColumns value of CreateTableRequest. +func (c *CreateTableRequest) SetSortColumns(cols []string) *CreateTableRequest { + c.SortColumns = cols + return c +} + +// SetSrcTable sets the SrcTable value of ClearAllCacheRequest. +func (c *CreateTableRequest) SetSrcTable(name string) *CreateTableRequest { + c.SrcTable = name + return c +} + +// SetDimensionTableName sets the DimensionTableName value of ClearAllCacheRequest. +func (c *CreateTableRequest) SetDimensionTableName(name string) *CreateTableRequest { + c.DimensionTableName = name + return c +} + +// TableRequest is the request struct of Table api. +type TableRequest struct { + TableName string + + TableParams []TableParam +} + +// TableParam stores the params for Table api. +type TableParam struct { + Key string + Value string +} + +// SetTableName sets the TableName value of TableRequest. +func (t *TableRequest) SetTableName(name string) *TableRequest { + t.TableName = name + return t +} + +// SetTableParams sets the TableParams value of TableRequest. +func (t *TableRequest) SetTableParams(params []TableParam) *TableRequest { + t.TableParams = params + return t +} + +// AddTableParam adds an element to the TableParams value of TableRequest. +func (t *TableRequest) AddTableParam(key, value string) *TableRequest { + if t.TableParams == nil { + t.TableParams = make([]TableParam, 0) + } + + t.TableParams = append(t.TableParams, TableParam{key, value}) + return t +} + +// TableWithCapacityRequest is the request struct of TableWithCapacity api. +type TableWithCapacityRequest struct { + TableName string + Capacity int32 + Size int32 + ColNames []string + ColTypes []string +} + +// SetTableName sets the TableName value of TableWithCapacityRequest. +func (t *TableWithCapacityRequest) SetTableName(name string) *TableWithCapacityRequest { + t.TableName = name + return t +} + +// SetSize sets the Size value of TableWithCapacityRequest. +func (t *TableWithCapacityRequest) SetSize(size int32) *TableWithCapacityRequest { + t.Size = size + return t +} + +// SetColNames sets the ColNames value of TableWithCapacityRequest. +func (t *TableWithCapacityRequest) SetColNames(colNames []string) *TableWithCapacityRequest { + t.ColNames = colNames + return t +} + +// SetColTypes sets the ColTypes value of TableWithCapacityRequest. +func (t *TableWithCapacityRequest) SetColTypes(colTypes []string) *TableWithCapacityRequest { + t.ColTypes = colTypes + return t +} + +// SetCapacity sets the Capacity value of TableWithCapacityRequest. +func (t *TableWithCapacityRequest) SetCapacity(capacity int32) *TableWithCapacityRequest { + t.Capacity = capacity + return t +} + +// DropTableRequest is the request struct of DropTable api. +// If you have declared a DBHandle before, you can set it, +// or you should set the DBPath. +type DropTableRequest struct { + TableName string + DBHandle string + DBPath string +} + +// SetTableName sets the TableName value of DropTableRequest. +func (d *DropTableRequest) SetTableName(name string) *DropTableRequest { + d.TableName = name + return d +} + +// SetDBPath sets the DBPath value of DropTableRequest. +func (d *DropTableRequest) SetDBPath(path string) *DropTableRequest { + d.DBPath = path + return d +} + +// SetDBHandle sets the DBHandle value of DropTableRequest. +func (d *DropTableRequest) SetDBHandle(dbHandle string) *DropTableRequest { + d.DBHandle = dbHandle + return d +} + +// DropPartitionRequest is the request struct of DropPartition api. +// If you have declared a DBHandle before, you can set it, +// or you should set the DBPath. +type DropPartitionRequest struct { + DBHandle string + DBPath string + TableName string + PartitionPaths string +} + +// SetTableName sets the TableName value of DropPartitionRequest. +func (d *DropPartitionRequest) SetTableName(name string) *DropPartitionRequest { + d.TableName = name + return d +} + +// SetDBPath sets the DBPath value of DropPartitionRequest. +func (d *DropPartitionRequest) SetDBPath(path string) *DropPartitionRequest { + d.DBPath = path + return d +} + +// SetDBHandle sets the DBHandle value of DropPartitionRequest. +func (d *DropPartitionRequest) SetDBHandle(dbHandle string) *DropPartitionRequest { + d.DBHandle = dbHandle + return d +} + +// SetPartitionPaths sets the PartitionPaths value of DropPartitionRequest. +func (d *DropPartitionRequest) SetPartitionPaths(partitionPaths string) *DropPartitionRequest { + d.PartitionPaths = partitionPaths + return d +} + +// LoginRequest is the request struct of Login api. +type LoginRequest struct { + UserID string + Password string +} + +// SetUserID sets the UserID value of LoginRequest. +func (l *LoginRequest) SetUserID(name string) *LoginRequest { + l.UserID = name + return l +} + +// SetPassword sets the Password value of LoginRequest. +func (l *LoginRequest) SetPassword(password string) *LoginRequest { + l.Password = password + return l +} + +// UndefRequest is the request struct of Undef api. +type UndefRequest struct { + Obj string + ObjType string +} + +// SetObj sets the Obj value of UndefRequest. +func (l *UndefRequest) SetObj(obj string) *UndefRequest { + l.Obj = obj + return l +} + +// SetObjType sets the ObjType value of UndefRequest. +func (l *UndefRequest) SetObjType(objType string) *UndefRequest { + l.ObjType = objType + return l +} + +// ClearAllCacheRequest is the request struct of ClearAllCache api. +type ClearAllCacheRequest struct { + IsDFS bool +} + +// SetIsDFS sets the IsDFS value of ClearAllCacheRequest. +func (l *ClearAllCacheRequest) SetIsDFS(isDFS bool) *ClearAllCacheRequest { + l.IsDFS = isDFS + return l +} diff --git a/api/request_test.go b/api/request_test.go new file mode 100644 index 0000000..e2ae41d --- /dev/null +++ b/api/request_test.go @@ -0,0 +1,40 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRequest(t *testing.T) { + s := new(SaveTableRequest).SetDBPath("/db") + assert.Equal(t, s.DBPath, "/db") + + l := new(LoadTextRequest).SetDelimiter(",") + assert.Equal(t, l.Delimiter, ",") + + // st := new(SaveTextRequest).SetDelimiter(",") + // assert.Equal(t, st.Delimiter, ",") + + pt := new(PloadTextRequest).SetDelimiter(",") + assert.Equal(t, pt.Delimiter, ",") + + lt := new(LoadTableRequest).SetMemoryMode(true). + SetPartitions("[test]") + assert.Equal(t, lt.MemoryMode, true) + assert.Equal(t, lt.Partitions, "[test]") + + ct := new(CreateTableRequest).SetSortColumns([]string{"id"}) + assert.Equal(t, ct.SortColumns, []string{"id"}) + + tReq := new(TableRequest).SetTableParams([]TableParam{ + { + Key: "key", + Value: "value", + }, + }) + assert.Equal(t, tReq.TableParams[0], TableParam{ + Key: "key", + Value: "value", + }) +} diff --git a/api/table.go b/api/table.go new file mode 100644 index 0000000..cffb453 --- /dev/null +++ b/api/table.go @@ -0,0 +1,134 @@ +package api + +import ( + "github.com/dolphindb/api-go/model" +) + +// Table is the client of table script. +// TODO: supports table script. +type Table struct { + db *dolphindb + + // selectSQL []string + // whereSQL []string + // script string + // isExec bool + // schemaInit bool + + // Handle is the handle of Table which has been defined on server + Handle string + // Data is the real value of Table + Data *model.Table +} + +// func (t *Table) setDB(db *dolphindb) *Table { +// t.db = db +// return t +// } + +// func (t *Table) setData(data *model.Table) *Table { +// t.Data = data +// return t +// } + +// func (t *Table) setHandle(name string) *Table { +// t.Handle = name +// return t +// } + +// GetHandle returns the variable name of Table which has been defined on server. +func (t *Table) GetHandle() string { + return t.Handle +} + +// GetSession returns the session id of the connection to dolphindb. +func (t *Table) GetSession() string { + return t.db.GetSession() +} + +// String returns the string format of the data. +func (t *Table) String() string { + return t.Data.String() +} + +// func (t *Table) Select(s []string) *Table { +// t.selectSql = s +// t.schemaInit = true +// return t +// } + +// func (t *Table) Exec(s []string) *Table { +// t.selectSql = s +// t.isExec = true +// t.schemaInit = true +// return t +// } + +// func (t *Table) Where(w []string) *Table { +// if len(w) == 0 { +// return t +// } + +// if t.whereSql == nil { +// t.whereSql = make([]string, 0) +// } + +// t.whereSql = append(t.whereSql, w...) +// return t +// } + +// func (t *Table) initSchema() error { +// raw, err := t.db.RunScript(fmt.Sprintf("schema(%s)", t.Handle)) +// if err != nil { +// return err +// } + +// dict := raw.(*model.Dictionary) +// keys, err := dict.Keys.Data.StringList() +// if err != nil { +// return err +// } + +// for k, v := range keys { +// if v == "colDefs" { +// dt := dict.Values.Data.Get(k) +// ta := dt.DataForm().(*model.Table) +// for k, v := range ta.Columns { +// str, err := k.String() +// if err != nil { +// return err +// } + +// if str == "name" { +// names, err := v.Data.StringList() +// if err != nil { +// return err +// } +// t.Select(names) +// return nil +// } +// } +// } +// } + +// return errors.New("init schema failed: there is no column in table") +// } + +// func (t *Table) ToDF() (model.DataForm, error) { +// if !t.schemaInit { +// err := t.initSchema() +// if err != nil { +// return nil, err +// } +// } +// sql := t.Sql() +// df, err := t.db.RunScript(sql) +// if err != nil { +// return nil, err +// } +// return df, nil +// } + +// func (t *Table) Sql() string { +// return "" +// } diff --git a/api/table_appender.go b/api/table_appender.go new file mode 100644 index 0000000..9b431de --- /dev/null +++ b/api/table_appender.go @@ -0,0 +1,122 @@ +package api + +import ( + "fmt" + + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/model" +) + +// TableAppender is used to append tables into another. +type TableAppender struct { + // DBPath of database + DBPath string + // Name of table + TableName string + + // conn which has connected to and logged in the dolphindb server + Conn dialer.Conn + + columnTypes []model.DataTypeByte + nameList []string +} + +// TableAppenderOption helps you to init TableAppender. +type TableAppenderOption struct { + // DBPath of table + DBPath string + // Name of table + TableName string + // Conn which has connected to and logged in the dolphindb server + Conn dialer.Conn +} + +// NewTableAppender instantiates a new TableAppender object according to the option. +func NewTableAppender(opt *TableAppenderOption) *TableAppender { + ta := &TableAppender{ + Conn: opt.Conn, + DBPath: opt.DBPath, + TableName: opt.TableName, + } + + var script string + if opt.DBPath == "" { + script = fmt.Sprintf("schema(%s)", opt.TableName) + } else { + script = fmt.Sprintf("schema(loadTable(\"%s\", \"%s\"))", opt.DBPath, opt.TableName) + } + + ret, err := opt.Conn.RunScript(script) + if err != nil { + fmt.Printf("Failed to get table %s schema: %s\n", opt.TableName, err.Error()) + return nil + } + + tableInfo := ret.(*model.Dictionary) + dt, err := tableInfo.Get("colDefs") + if err != nil { + fmt.Printf("Failed to get colDefs from table: %s\n", err.Error()) + return nil + } + + schema := dt.Value().(*model.Table) + + typeList := schema.GetColumnByName("typeInt") + ta.columnTypes = make([]model.DataTypeByte, typeList.Data.Len()) + for i := 0; i < typeList.Data.Len(); i++ { + raw := typeList.Data.ElementValue(i) + ta.columnTypes[i] = model.DataTypeByte(raw.(int32)) + } + + ta.nameList = schema.GetColumnByName("name").Data.StringList() + return ta +} + +// Close closes the connection. +func (p *TableAppender) Close() error { + return p.Conn.Close() +} + +// IsClosed checks whether the TableAppender is closed. +func (p *TableAppender) IsClosed() bool { + return p.Conn.IsClosed() +} + +// Append appends a table to the table which has been set when calling NewTableAppender. +func (p *TableAppender) Append(tb *model.Table) (model.DataForm, error) { + paramTable, err := p.packageTable(tb) + if err != nil { + fmt.Printf("Failed to package table: %s\n", err.Error()) + return nil, err + } + + if p.DBPath == "" { + return p.Conn.RunFunc(fmt.Sprintf("append!{%s}", p.TableName), []model.DataForm{paramTable}) + } + + return p.Conn.RunFunc(fmt.Sprintf("append!{loadTable(\"%s\",\"%s\"), }", + p.DBPath, p.TableName), []model.DataForm{paramTable}) +} + +func (p *TableAppender) packageTable(tb *model.Table) (*model.Table, error) { + cols := make([]*model.Vector, len(p.nameList)) + for k := range p.nameList { + srcVct := tb.GetColumnByIndex(k) + srcDt := srcVct.GetDataType() + if (srcDt == model.DtDate || srcDt == model.DtMonth || srcDt == model.DtTime || srcDt == model.DtMinute || + srcDt == model.DtSecond || srcDt == model.DtDatetime || srcDt == model.DtTimestamp || srcDt == model.DtNanoTime || + srcDt == model.DtNanoTimestamp || srcDt == model.DtDateHour) && srcDt != p.columnTypes[k] { + raw, err := model.CastDateTime(tb.GetColumnByIndex(k), p.columnTypes[k]) + if err != nil { + fmt.Printf("Failed to cast DateTime before appending: %s\n", err.Error()) + return nil, err + } + + cols[k] = raw.(*model.Vector) + } else { + cols[k] = srcVct + } + } + + return model.NewTable(p.nameList, cols), nil +} diff --git a/api/table_appender_test.go b/api/table_appender_test.go new file mode 100644 index 0000000..c442739 --- /dev/null +++ b/api/table_appender_test.go @@ -0,0 +1,43 @@ +package api + +import ( + "context" + "testing" + "time" + + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/model" + "github.com/stretchr/testify/assert" +) + +func TestTableAppender(t *testing.T) { + conn, err := dialer.NewSimpleConn(context.TODO(), testAddress, "user", "password") + assert.Nil(t, err) + + opt := &TableAppenderOption{ + DBPath: "db", + TableName: "table", + Conn: conn, + } + + ta := NewTableAppender(opt) + assert.NotNil(t, ta) + + col, err := model.NewDataTypeListWithRaw(model.DtTimestamp, []time.Time{time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), + time.Date(2022, time.Month(1), 1, 2, 1, 0, 0, time.UTC), time.Date(2022, time.Month(1), 1, 3, 1, 0, 0, time.UTC)}) + assert.Nil(t, err) + + col1, err := model.NewDataTypeListWithRaw(model.DtString, []string{"col1", "col1", "col1"}) + assert.Nil(t, err) + + tb := model.NewTable([]string{"date", "sym"}, []*model.Vector{model.NewVector(col), model.NewVector(col1)}) + res, err := ta.Append(tb) + assert.Nil(t, err) + assert.Equal(t, res.String(), "int(1)") + + assert.Equal(t, ta.IsClosed(), false) + + err = ta.Close() + assert.Nil(t, err) + assert.Equal(t, ta.IsClosed(), true) +} diff --git a/api/table_test.go b/api/table_test.go new file mode 100644 index 0000000..6c02095 --- /dev/null +++ b/api/table_test.go @@ -0,0 +1,18 @@ +package api + +import ( + "testing" +) + +func TestTable(t *testing.T) { + // tb := new(Table) + + // dfTb := new(model.Table) + // tb.setData(dfTb) + // assert.Equal(t, tb.GetHandle(), "") + + // tb.setHandle("handler") + // assert.Equal(t, tb.GetHandle(), "handler") + + // assert.Equal(t, tb.String(), "table[0r][0c]([\n\t])") +} diff --git a/api/task.go b/api/task.go new file mode 100644 index 0000000..f89439c --- /dev/null +++ b/api/task.go @@ -0,0 +1,29 @@ +package api + +import "github.com/dolphindb/api-go/model" + +// Task is the unit of work that is executed in the DBConnectionPool. +type Task struct { + // Script is required + Script string + // Args is optional, if you set it, the task will be executed by RunFunc or by RunScript + Args []model.DataForm + + result model.DataForm + err error +} + +// GetResult returns the execution result of the task. +func (t *Task) GetResult() model.DataForm { + return t.result +} + +// IsSuccess checks whether the task is executed successfully. +func (t *Task) IsSuccess() bool { + return t.err == nil +} + +// GetError gets the execution error of the task. +func (t *Task) GetError() error { + return t.err +} diff --git a/api/task_test.go b/api/task_test.go new file mode 100644 index 0000000..8073796 --- /dev/null +++ b/api/task_test.go @@ -0,0 +1,24 @@ +package api + +import ( + "testing" + + "github.com/dolphindb/api-go/model" + + "github.com/stretchr/testify/assert" +) + +func TestTask(t *testing.T) { + dt, err := model.NewDataType(model.DtString, "task") + assert.Nil(t, err) + + s := model.NewScalar(dt) + task := &Task{ + result: s, + err: nil, + } + res := task.GetResult() + assert.Equal(t, res.String(), "string(task)") + assert.Nil(t, task.GetError()) + assert.Equal(t, task.IsSuccess(), true) +} diff --git a/api/utils.go b/api/utils.go new file mode 100644 index 0000000..621a8d5 --- /dev/null +++ b/api/utils.go @@ -0,0 +1,85 @@ +package api + +import ( + "fmt" + "strings" + + uuid "github.com/satori/go.uuid" +) + +func generateDBName() string { + return fmt.Sprintf("db_%s", uuid.NewV4().String()[:8]) +} + +func generateTableName() string { + return fmt.Sprintf("tb_%s", uuid.NewV4().String()[:8]) +} + +func generateCreateDatabaseParam(d *DatabaseRequest) string { + buf := strings.Builder{} + + if d.Directory != "" { + buf.WriteString("directory='") + buf.WriteString(d.Directory) + buf.WriteString("',") + } + + if d.PartitionType != "" { + buf.WriteString("partitionType=") + buf.WriteString(d.PartitionType) + buf.WriteString(",") + } + + if d.PartitionScheme != "" { + buf.WriteString("partitionScheme=") + buf.WriteString(d.PartitionScheme) + buf.WriteString(",") + } + + if d.Locations != "" { + buf.WriteString("locations=") + buf.WriteString(d.Locations) + buf.WriteString(",") + } + + if d.Engine != "" { + buf.WriteString("Engine='") + buf.WriteString(d.Engine) + buf.WriteString("',") + } + + if d.Atomic != "" { + buf.WriteString("Atomic='") + buf.WriteString(d.Atomic) + buf.WriteString("',") + } + + return strings.TrimSuffix(buf.String(), ",") +} + +func generateSaveTableParam(d *SaveTableRequest) string { + buf := strings.Builder{} + buf.WriteString(d.DBHandle) + buf.WriteString(", ") + buf.WriteString(d.Table) + if d.TableName != "" { + buf.WriteString(", `") + buf.WriteString(d.TableName) + + buf.WriteString(", ") + if d.Appending { + buf.WriteString("1") + } else { + buf.WriteString("0") + } + + buf.WriteString(", ") + if d.Compression { + buf.WriteString("1") + } else { + buf.WriteString("0") + } + } + + return buf.String() +} diff --git a/api/utils_test.go b/api/utils_test.go new file mode 100644 index 0000000..b3a2692 --- /dev/null +++ b/api/utils_test.go @@ -0,0 +1,24 @@ +package api + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUtils(t *testing.T) { + assert.NotEqual(t, generateTableName(), generateTableName()) + assert.NotEqual(t, generateDBName(), generateDBName()) + + dReq := &DatabaseRequest{ + DBHandle: "db", + Directory: "dfs://db", + PartitionType: "t", + PartitionScheme: "s", + Locations: "l", + Engine: "e", + Atomic: "a", + } + + assert.Equal(t, generateCreateDatabaseParam(dReq), "directory='dfs://db',partitionType=t,partitionScheme=s,locations=l,Engine='e',Atomic='a'") +} diff --git a/dialer/dialer.go b/dialer/dialer.go new file mode 100644 index 0000000..5211218 --- /dev/null +++ b/dialer/dialer.go @@ -0,0 +1,368 @@ +package dialer + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/dolphindb/api-go/model" +) + +const ( + defaultByteOrder = protocol.LittleEndianByte + defaultTimeout = time.Minute +) + +const ( + connectCmd = "connect" + scriptCmd = "script" + functionCmd = "function" + variableCmd = "variable" +) + +// Conn is the interface of DolphinDB conn. +type Conn interface { + net.Conn + + // Connect connects to dolphindb server + Connect() error + // GetLocalAddress gets the local address with the connection + GetLocalAddress() string + + // RefreshTimeout resets the timeout of the connection + RefreshTimeout(t time.Duration) + // GetSession gets the session id of the connection + GetSession() string + // Close closes the connection with server + Close() error + // IsClosed checks whether the connection is closed + IsClosed() bool + // AddInitScript(script string) + // SetInitScripts(scripts []string) + // GetInitScripts() []string + + // RunScript sends script to dolphindb and returns the execution result + RunScript(s string) (model.DataForm, error) + // RunFile sends script from a specific file to dolphindb and returns the execution result + RunFile(path string) (model.DataForm, error) + // RunFunc sends function request to dolphindb and returns the execution result. + // See DolphinDB function and command references: https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/FunctionReferences/index.html + RunFunc(s string, args []model.DataForm) (model.DataForm, error) + // Upload sends local objects to dolphindb server and the specified variable is generated on the dolphindb + Upload(vars map[string]model.DataForm) (model.DataForm, error) +} + +type conn struct { + lock sync.Mutex + + net.Conn + reader protocol.Reader + behaviorOpt *BehaviorOptions + sessionID []byte + connected bool + // initScripts []string + + timeout time.Duration +} + +// BehaviorOptions helps you configure behavior identity. +// Refer to https://github.com/dolphindb/Tutorials_CN/blob/master/api_protocol.md#254-%E8%A1%8C%E4%B8%BA%E6%A0%87%E8%AF%86 for more details. +type BehaviorOptions struct { + // Priority specifies the priority of the task + Priority *int + // Parallelism specifies the parallelism of the task + Parallelism *int + // FetchSize specifies the fetchSize of the task + FetchSize *int +} + +// SetPriority sets the priority of the task. +func (f *BehaviorOptions) SetPriority(p int) *BehaviorOptions { + f.Priority = &p + return f +} + +// SetParallelism sets the parallelism of the task. +func (f *BehaviorOptions) SetParallelism(p int) *BehaviorOptions { + f.Parallelism = &p + return f +} + +// SetFetchSize sets the fetchSize of the task. +func (f *BehaviorOptions) SetFetchSize(fs int) *BehaviorOptions { + f.FetchSize = &fs + return f +} + +// GetPriority gets the priority of the task. +func (f *BehaviorOptions) GetPriority() int { + if f.Priority == nil { + return 4 + } + return *f.Priority +} + +// GetParallelism gets the parallelism of the task. +func (f *BehaviorOptions) GetParallelism() int { + if f.Parallelism == nil { + return 2 + } + return *f.Parallelism +} + +// GetFetchSize gets the fetchSize of the task. +func (f *BehaviorOptions) GetFetchSize() int { + if f.FetchSize == nil { + return 0 + } + return *f.FetchSize +} + +// NewConn instantiates a new connection with the addr. +// BehaviorOpt will affect every request sent by conn. +// You can input opts to configure conn. +func NewConn(ctx context.Context, addr string, behaviorOpt *BehaviorOptions) (Conn, error) { + tcpAddr, err := net.ResolveTCPAddr("tcp", addr) + if err != nil { + return nil, err + } + + dc, err := net.DialTCP("tcp", nil, tcpAddr) + if err != nil { + return nil, err + } + + err = dc.SetKeepAlive(true) + if err != nil { + return nil, err + } + + c := &conn{ + behaviorOpt: behaviorOpt, + Conn: dc, + reader: protocol.NewReader(dc), + timeout: defaultTimeout, + } + + return c, nil +} + +// NewSimpleConn instantiates a new connection with the addr, +// which connects to the server and logs in with the userID and pwd. +func NewSimpleConn(ctx context.Context, address, userID, pwd string) (Conn, error) { + conn, err := NewConn(ctx, address, nil) + if err != nil { + return nil, err + } + + err = conn.Connect() + if err != nil { + return nil, err + } + + _, err = conn.RunScript(fmt.Sprintf("login('%s','%s')", userID, pwd)) + if err != nil { + return nil, err + } + + return conn, err +} + +// Add an init script which will be run after you call connect +// func (c *conn) AddInitScript(script string) { +// if c.initScripts == nil { +// c.initScripts = make([]string, 0) +// } +// c.initScripts = append(c.initScripts, script) +// } + +func (c *conn) GetLocalAddress() string { + addr := c.LocalAddr().String() + return strings.Split(addr, ":")[0] +} + +// Get init scripts which will be run after you call connect +// func (c *conn) GetInitScripts() []string { +// return c.initScripts +// } + +// Set init scripts which will be run after you call connect +// func (c *conn) SetInitScripts(scripts []string) { +// c.initScripts = scripts +// } + +func (c *conn) RefreshTimeout(t time.Duration) { + c.timeout = t +} + +func (c *conn) Connect() error { + h, _, err := c.run(&requestParams{ + commandType: connectCmd, + Command: generateConnectionCommand(), + }) + if err != nil { + return err + } + + // if len(c.initScripts) != 0 { + // for _, v := range c.initScripts { + // _, err := c.RunScript(v) + // if err != nil { + // return err + // } + // } + // } + + c.connected = true + c.refreshHeaderForResponse(h) + + return nil +} + +func (c *conn) Close() error { + if err := c.Conn.Close(); err != nil { + return err + } + + c.connected = false + c.sessionID = nil + + return nil +} + +func (c *conn) IsClosed() bool { + return !c.connected +} + +// RunScript sends script to dolphindb and return the execution result. +func (c *conn) RunScript(s string) (model.DataForm, error) { + _, di, err := c.run(&requestParams{ + commandType: scriptCmd, + Command: generateScriptCommand(s), + }) + + return di, err +} + +// RunFile sends script from a specific file to dolphindb and return the execution result. +func (c *conn) RunFile(path string) (model.DataForm, error) { + var err error + if !filepath.IsAbs(path) { + path, err = filepath.Abs(path) + if err != nil { + return nil, err + } + } + + fl, err := os.Open(path) + if err != nil { + return nil, err + } + + defer fl.Close() + + byt, err := ioutil.ReadAll(fl) + if err != nil { + return nil, err + } + + _, di, err := c.run(&requestParams{ + commandType: scriptCmd, + Command: generateScriptCommand(string(byt)), + }) + + return di, err +} + +// GetSession returns session id. +func (c *conn) GetSession() string { + return string(c.sessionID) +} + +// RunFunc sends function request to dolphindb and return the execution result. +// Refer to https://www.dolphindb.cn/cn/help/130/FunctionsandCommands/FunctionReferences/index.html for more details. +func (c *conn) RunFunc(s string, args []model.DataForm) (model.DataForm, error) { + bo := defaultByteOrder + + _, di, err := c.run(&requestParams{ + commandType: functionCmd, + Command: generateFunctionCommand(s, bo, args), + SessionID: []byte(c.GetSession()), + Args: args, + ByteOrder: bo, + }) + + return di, err +} + +// Upload sends local data to dolphindb and the specified variable is generated on the dolphindb. +func (c *conn) Upload(vars map[string]model.DataForm) (model.DataForm, error) { + bo := defaultByteOrder + + names := make([]string, len(vars)) + count := 0 + args := make([]model.DataForm, len(vars)) + for k, v := range vars { + names[count] = k + args[count] = v + count++ + } + _, di, err := c.run(&requestParams{ + commandType: variableCmd, + Command: generateVariableCommand(strings.Join(names, ","), bo, count), + SessionID: []byte(c.GetSession()), + Args: args, + ByteOrder: bo, + }) + + return di, err +} + +func (c *conn) run(params *requestParams) (*responseHeader, model.DataForm, error) { + if params.commandType == scriptCmd || params.commandType == functionCmd { + if c.behaviorOpt == nil { + c.behaviorOpt = &BehaviorOptions{} + } + + if c.behaviorOpt.GetFetchSize() > 0 && c.behaviorOpt.GetFetchSize() < 8192 { + return nil, nil, fmt.Errorf("fetchSize %d must be greater than 8192", c.behaviorOpt.GetFetchSize()) + } + } + + c.lock.Lock() + defer c.lock.Unlock() + + err := c.SetDeadline(time.Now().Add(c.timeout)) + if err != nil { + return nil, nil, err + } + + w := protocol.NewWriter(c.Conn) + err = writeRequest(w, params, c.behaviorOpt) + if err != nil { + return nil, nil, err + } + + err = w.Flush() + if err != nil { + return nil, nil, err + } + + h, di, err := c.parseResponse(c.reader) + if err != nil { + return nil, nil, err + } + + return h, di, nil +} + +func (c *conn) refreshHeaderForResponse(h *responseHeader) { + c.sessionID = h.sessionID +} diff --git a/dialer/dialer_test.go b/dialer/dialer_test.go new file mode 100644 index 0000000..56ceefa --- /dev/null +++ b/dialer/dialer_test.go @@ -0,0 +1,149 @@ +package dialer + +import ( + "context" + "net" + "os" + "strings" + "testing" + "time" + + "github.com/dolphindb/api-go/model" + + "github.com/stretchr/testify/assert" +) + +const testAddr = "127.0.0.1:3002" + +func TestDialer(t *testing.T) { + fOpt := new(BehaviorOptions) + assert.Equal(t, fOpt.GetParallelism(), 2) + assert.Equal(t, fOpt.GetPriority(), 4) + assert.Equal(t, fOpt.GetFetchSize(), 0) + + fOpt.SetFetchSize(100). + SetPriority(2). + SetParallelism(4) + assert.Equal(t, fOpt.GetParallelism(), 4) + assert.Equal(t, fOpt.GetPriority(), 2) + assert.Equal(t, fOpt.GetFetchSize(), 100) + + _, err := NewConn(context.TODO(), testAddr, nil) + assert.Nil(t, err) + + c, err := NewSimpleConn(context.TODO(), testAddr, "user", "password") + assert.Nil(t, err) + + // c.AddInitScript("schema()") + // assert.Equal(t, c.GetInitScripts(), []string{"schema()"}) + + // c.SetInitScripts([]string{"init", "login"}) + // assert.Equal(t, c.GetInitScripts(), []string{"init", "login"}) + + c.RefreshTimeout(10 * time.Second) + + err = c.Connect() + assert.Nil(t, err) + assert.Equal(t, c.IsClosed(), false) + + f, err := os.Create("test.txt") + assert.Nil(t, err) + + _, err = f.Write([]byte("login")) + assert.Nil(t, err) + + err = f.Close() + assert.Nil(t, err) + + _, err = c.RunFile("./test.txt") + assert.Nil(t, err) + + err = os.Remove("./test.txt") + assert.Nil(t, err) + + dt, err := model.NewDataType(model.DtString, "test") + assert.Nil(t, err) + + s := model.NewScalar(dt) + _, err = c.RunFunc("typestr", []model.DataForm{s}) + assert.Nil(t, err) + + df, err := c.Upload(map[string]model.DataForm{"scalar": s}) + assert.Nil(t, err) + assert.Equal(t, c.GetSession(), "20267359") + assert.Equal(t, df.GetDataForm(), model.DfScalar) + assert.Equal(t, df.GetDataType(), model.DtString) + assert.Equal(t, df.String(), "string(OK)") + + address := c.GetLocalAddress() + assert.True(t, strings.HasPrefix(address, "127.0.0.1")) + + err = c.Close() + assert.Nil(t, err) + assert.True(t, c.IsClosed()) +} + +func TestMain(m *testing.M) { + exit := make(chan bool) + ln, err := net.Listen("tcp", testAddr) + if err != nil { + return + } + go func() { + for !isExit(exit) { + conn, err := ln.Accept() + if err != nil { + return + } + + go handleData(conn) + } + + ln.Close() + }() + + exitCode := m.Run() + + close(exit) + + os.Exit(exitCode) +} + +func handleData(conn net.Conn) { + res := make([]byte, 0) + for { + buf := make([]byte, 512) + l, err := conn.Read(buf) + if err != nil { + continue + } + + res = append(res, buf[0:l]...) + if len(res) == 15 || len(res) == 29 || len(res) == 30 || len(res) == 48 || + len(res) == 48 || len(res) == 54 || len(res) == 49 { + _, err = conn.Write([]byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x30, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a}) + if err != nil { + return + } + + res = make([]byte, 0) + } else if len(res) == 42 { + _, err = conn.Write([]byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, + 0x12, 0x00, 0x4f, 0x4b, 0x00}) + if err != nil { + return + } + + res = make([]byte, 0) + } + } +} + +func isExit(exit <-chan bool) bool { + select { + case <-exit: + return true + default: + return false + } +} diff --git a/dialer/protocol/buffer.go b/dialer/protocol/buffer.go new file mode 100644 index 0000000..8f5d3cd --- /dev/null +++ b/dialer/protocol/buffer.go @@ -0,0 +1,102 @@ +package protocol + +import ( + "bytes" +) + +// Buffer helps to read blobs efficiently. +type Buffer struct { + buf *bytes.Buffer + ind int64 + l int64 + count int + + r Reader +} + +// NewBuffer inits a Buffer object. +func NewBuffer(count int, r Reader) *Buffer { + return &Buffer{ + count: count, + r: r, + } +} + +func (b *Buffer) isEmpty() bool { + return b.l <= b.ind +} + +// ReadBlobs helps to read blobs. +func (b *Buffer) ReadBlobs(bo ByteOrder) ([][]byte, error) { + res := make([][]byte, b.count) + ind := 0 + for b.count > 0 { + l, err := b.read(4) + if err != nil { + return nil, err + } + + b.count-- + + length := int(bo.Uint32(l)) + if length == 0 { + continue + } + + res[ind], err = b.read(length) + if err != nil { + return nil, err + } + + ind++ + } + + return res, nil +} + +func (b *Buffer) fill(count int) error { + if count == 0 { + count = 1 + } + + tmp, err := b.r.ReadCertainBytes(count) + if err != nil { + return err + } + + b.buf = bytes.NewBuffer(tmp) + b.ind = 0 + b.l = int64(count) + return err +} + +func (b *Buffer) read(length int) ([]byte, error) { + if b.isEmpty() { + err := b.fill(4 * b.count) + if err != nil { + return nil, err + } + } + + return b.copy(length) +} + +func (b *Buffer) copy(count int) ([]byte, error) { + res := b.buf.Next(count) + lt := len(res) + b.ind += int64(lt) + if lt < count { + err := b.fill(4 * b.count) + if err != nil { + return nil, err + } + + tmp, err := b.copy(count - lt) + if err != nil { + return nil, err + } + res = append(res, tmp...) + } + + return res, nil +} diff --git a/dialer/protocol/buffer_test.go b/dialer/protocol/buffer_test.go new file mode 100644 index 0000000..bcf6e81 --- /dev/null +++ b/dialer/protocol/buffer_test.go @@ -0,0 +1,18 @@ +package protocol + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBuffer(t *testing.T) { + r := bytes.NewReader([]byte{1, 0, 0, 0, 10}) + + buf := NewBuffer(1, NewReader(r)) + blobs, err := buf.ReadBlobs(LittleEndian) + assert.Nil(t, err) + assert.Equal(t, len(blobs), 1) + assert.Equal(t, blobs[0][0], byte(10)) +} diff --git a/dialer/protocol/byte_order.go b/dialer/protocol/byte_order.go new file mode 100644 index 0000000..725cba2 --- /dev/null +++ b/dialer/protocol/byte_order.go @@ -0,0 +1,41 @@ +package protocol + +import "encoding/binary" + +const ( + // BigEndianByte is the byte type of BigEndian. + BigEndianByte byte = '0' + // LittleEndianByte is the byte type of LittleEndian. + LittleEndianByte byte = '1' +) + +var ( + // BigEndian is the big-endian implementation of ByteOrder. + BigEndian = &bigEndian{binary.BigEndian} + // LittleEndian is the little-endian implementation of ByteOrder. + LittleEndian = &littleEndian{binary.LittleEndian} + + byteOrderSet = map[byte]ByteOrder{ + BigEndianByte: BigEndian, + LittleEndianByte: LittleEndian, + } +) + +// ByteOrder interface declares functions about how to handle data. +type ByteOrder interface { + binary.ByteOrder +} + +type littleEndian struct { + binary.ByteOrder +} + +type bigEndian struct { + binary.ByteOrder +} + +// GetByteOrder returns the BigEndian or LittleEndian according to the b. +// '0' return BigEndian or '1' return LittleEndian. +func GetByteOrder(b byte) ByteOrder { + return byteOrderSet[b] +} diff --git a/dialer/protocol/byte_order_test.go b/dialer/protocol/byte_order_test.go new file mode 100644 index 0000000..168f603 --- /dev/null +++ b/dialer/protocol/byte_order_test.go @@ -0,0 +1,15 @@ +package protocol + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestGetByteOrder(t *testing.T) { + bo := GetByteOrder('0') + assert.Equal(t, bo, BigEndian) + + bo = GetByteOrder('1') + assert.Equal(t, bo, LittleEndian) +} diff --git a/dialer/protocol/constant.go b/dialer/protocol/constant.go new file mode 100644 index 0000000..1f566a2 --- /dev/null +++ b/dialer/protocol/constant.go @@ -0,0 +1,17 @@ +package protocol + +const ( + // NewLine is the byte format of \n. + NewLine byte = '\n' + // EmptySpace is the bytes format of space. + EmptySpace byte = ' ' + // StringSep is the bytes format of string sep. + StringSep byte = 0 +) + +var ( + // APIBytes is the bytes format of API. + APIBytes = []byte("API") + // RespOK is the bytes format of OK. + RespOK = []byte("OK") +) diff --git a/dialer/protocol/constant_test.go b/dialer/protocol/constant_test.go new file mode 100644 index 0000000..2d0eaff --- /dev/null +++ b/dialer/protocol/constant_test.go @@ -0,0 +1 @@ +package protocol diff --git a/dialer/protocol/reader.go b/dialer/protocol/reader.go new file mode 100644 index 0000000..d658bb2 --- /dev/null +++ b/dialer/protocol/reader.go @@ -0,0 +1,85 @@ +package protocol + +import ( + "bufio" + "io" +) + +// Reader interface declares functions to read data from reader. +type Reader interface { + // ReadCertainBytes reads exactly count bytes from reader. + // It returns the number of bytes copied and an error if fewer bytes were read. + // The error is EOF only if no bytes were read. + // If an EOF happens after reading some but not all the bytes, + ReadCertainBytes(count int) ([]byte, error) + // ReadByte reads and returns a single byte. + // If no byte is available, returns an error. + ReadByte() (byte, error) + // ReadBytes reads until the first occurrence of delim in the input, + // returning a slice containing the data up to and including the delimiter. + // If ReadBytes encounters an error before finding a delimiter, + // it returns the data read before the error and the error itself (often io.EOF). + // ReadBytes returns err != nil if and only if the returned data does not end in + // delim. + // For simple uses, a Scanner may be more convenient. + ReadBytes(delim byte) ([]byte, error) + + // Read reads data into p. + // It returns the number of bytes read into p. + // The bytes are taken from at most one Read on the underlying Reader, + // hence n may be less than len(p). + // To read exactly len(p) bytes, use io.ReadFull(b, p). + // At EOF, the count will be zero and err will be io.EOF. + Read(buf []byte) (int, error) +} + +type reader struct { + r *bufio.Reader +} + +// NewReader returns a reader instance which implement the Reader. +func NewReader(rd io.Reader) Reader { + return &reader{r: bufio.NewReaderSize(rd, 8192)} +} + +// ReadCertainBytes reads exactly count bytes from reader. +// It returns the number of bytes copied and an error if fewer bytes were read. +// The error is EOF only if no bytes were read. +// If an EOF happens after reading some but not all the bytes. +func (r *reader) ReadCertainBytes(count int) ([]byte, error) { + buf := make([]byte, count) + _, err := io.ReadFull(r.r, buf) + return buf, err +} + +// ReadByte reads and returns a single byte. +// If no byte is available, returns an error. +func (r *reader) ReadByte() (byte, error) { + return r.r.ReadByte() +} + +// ReadBytes reads until the first occurrence of delim in the input, +// returning a slice containing the data up to and including the delimiter. +// If ReadBytes encounters an error before finding a delimiter, +// it returns the data read before the error and the error itself (often io.EOF). +// ReadBytes returns err != nil if and only if the returned data does not end in +// delim. +// For simple uses, a Scanner may be more convenient. +func (r *reader) ReadBytes(delim byte) ([]byte, error) { + res, err := r.r.ReadBytes(delim) + if err != nil { + return nil, err + } + + return res[:len(res)-1], nil +} + +// Read reads data into p. +// It returns the number of bytes read into p. +// The bytes are taken from at most one Read on the underlying Reader, +// hence n may be less than len(p). +// To read exactly len(p) bytes, use io.ReadFull(b, p). +// At EOF, the count will be zero and err will be io.EOF. +func (r *reader) Read(buf []byte) (int, error) { + return r.r.Read(buf) +} diff --git a/dialer/protocol/reader_test.go b/dialer/protocol/reader_test.go new file mode 100644 index 0000000..00aa871 --- /dev/null +++ b/dialer/protocol/reader_test.go @@ -0,0 +1,29 @@ +package protocol + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestReader(t *testing.T) { + by := bytes.NewBuffer([]byte("\ntest reader")) + + r := NewReader(by) + b, err := r.ReadByte() + assert.Nil(t, err) + assert.Equal(t, b, NewLine) + + bs, err := r.ReadBytes(EmptySpace) + assert.Nil(t, err) + assert.Equal(t, string(bs), "test") + + bs, err = r.ReadCertainBytes(6) + assert.Nil(t, err) + assert.Equal(t, string(bs), "reader") + + _, err = r.ReadBytes(EmptySpace) + assert.NotNil(t, err) + assert.Equal(t, err.Error(), "EOF") +} diff --git a/dialer/protocol/unsafeslice.go b/dialer/protocol/unsafeslice.go new file mode 100644 index 0000000..0c73eb6 --- /dev/null +++ b/dialer/protocol/unsafeslice.go @@ -0,0 +1,185 @@ +// Package protocol contains functions for zero-copy casting between typed slices and byte slices. +package protocol + +import ( + "reflect" + "unsafe" +) + +// Useful constants. +const ( + TwoUint64Size = 16 + Uint64Size = 8 + Uint32Size = 4 + Uint16Size = 2 + Uint8Size = 1 +) + +func newRawSliceHeader(sh *reflect.SliceHeader, b []byte, stride int) *reflect.SliceHeader { + sh.Len = len(b) / stride + sh.Cap = len(b) / stride + sh.Data = (uintptr)(unsafe.Pointer(&b[0])) + return sh +} + +func newSliceHeaderFromBytes(b []byte, stride int) unsafe.Pointer { + //nolint + sh := &reflect.SliceHeader{} + return unsafe.Pointer(newRawSliceHeader(sh, b, stride)) +} + +func newSliceHeader(p unsafe.Pointer, size int) unsafe.Pointer { + //nolint + return unsafe.Pointer(&reflect.SliceHeader{ + Len: size, + Cap: size, + Data: uintptr(p), + }) +} + +// ByteSliceFromInt8Slice casts b to []byte. +func ByteSliceFromInt8Slice(b []int8) []byte { + if len(b) == 0 { + return []byte{} + } + return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint8Size)) +} + +// ByteSliceFromUint8Slice casts b to []byte. +func ByteSliceFromUint8Slice(b []uint8) []byte { + if len(b) == 0 { + return []byte{} + } + return b +} + +// ByteSliceFromInt16Slice casts b to []byte. +func ByteSliceFromInt16Slice(b []int16) []byte { + if len(b) == 0 { + return []byte{} + } + return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint16Size)) +} + +// ByteSliceFromUint16Slice casts b to []byte. +func ByteSliceFromUint16Slice(b []uint16) []byte { + if len(b) == 0 { + return []byte{} + } + return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint16Size)) +} + +// ByteSliceFromInt32Slice casts b to []byte. +func ByteSliceFromInt32Slice(b []int32) []byte { + if len(b) == 0 { + return []byte{} + } + return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint32Size)) +} + +// ByteSliceFromUint32Slice casts b to []byte. +func ByteSliceFromUint32Slice(b []uint32) []byte { + if len(b) == 0 { + return []byte{} + } + return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint32Size)) +} + +// ByteSliceFromInt64Slice casts b to []byte. +func ByteSliceFromInt64Slice(b []int64) []byte { + if len(b) == 0 { + return []byte{} + } + return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint64Size)) +} + +// ByteSliceFromUint64Slice casts b to []byte. +func ByteSliceFromUint64Slice(b []uint64) []byte { + if len(b) == 0 { + return []byte{} + } + return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint64Size)) +} + +// ByteSliceFromFloat32Slice casts b to []byte. +func ByteSliceFromFloat32Slice(b []float32) []byte { + if len(b) == 0 { + return []byte{} + } + return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint32Size)) +} + +// ByteSliceFromFloat64Slice casts b to []byte. +func ByteSliceFromFloat64Slice(b []float64) []byte { + if len(b) == 0 { + return []byte{} + } + return *(*[]byte)(newSliceHeader(unsafe.Pointer(&b[0]), len(b)*Uint64Size)) +} + +// Float32SliceFromByteSlice casts b to []byte. +func Float32SliceFromByteSlice(b []byte) []float32 { + return *(*[]float32)(newSliceHeaderFromBytes(b, Uint32Size)) +} + +// Float64SliceFromByteSlice casts b to []byte. +func Float64SliceFromByteSlice(b []byte) []float64 { + return *(*[]float64)(newSliceHeaderFromBytes(b, Uint64Size)) +} + +// Uint64SliceFromByteSlice casts b to []uint64. +func Uint64SliceFromByteSlice(b []byte) []uint64 { + return *(*[]uint64)(newSliceHeaderFromBytes(b, Uint64Size)) +} + +// Int64SliceFromByteSlice casts b to []int6. +func Int64SliceFromByteSlice(b []byte) []int64 { + return *(*[]int64)(newSliceHeaderFromBytes(b, Uint64Size)) +} + +// Uint32SliceFromByteSlice casts b to []uint32. +func Uint32SliceFromByteSlice(b []byte) []uint32 { + return *(*[]uint32)(newSliceHeaderFromBytes(b, Uint32Size)) +} + +// Int32SliceFromByteSlice casts b to []int32. +func Int32SliceFromByteSlice(b []byte) []int32 { + return *(*[]int32)(newSliceHeaderFromBytes(b, Uint32Size)) +} + +// Uint16SliceFromByteSlice casts b to []uint16. +func Uint16SliceFromByteSlice(b []byte) []uint16 { + return *(*[]uint16)(newSliceHeaderFromBytes(b, Uint16Size)) +} + +// Int16SliceFromByteSlice casts b to []int16. +func Int16SliceFromByteSlice(b []byte) []int16 { + return *(*[]int16)(newSliceHeaderFromBytes(b, Uint16Size)) +} + +// Uint8SliceFromByteSlice casts b to []uint8. +func Uint8SliceFromByteSlice(b []byte) []uint8 { + return b +} + +// Int8SliceFromByteSlice casts b to []int8. +func Int8SliceFromByteSlice(b []byte) []int8 { + return *(*[]int8)(newSliceHeaderFromBytes(b, Uint8Size)) +} + +// ByteSliceFromString casts b to []byte. +func ByteSliceFromString(s string) []byte { + h := (*reflect.StringHeader)(unsafe.Pointer(&s)) + return *(*[]byte)(newSliceHeader(unsafe.Pointer(h.Data), len(s)*Uint8Size)) +} + +// StringFromByteSlice casts b to string. +func StringFromByteSlice(b []byte) string { + //nolint + h := &reflect.StringHeader{ + //nolint + Data: uintptr(unsafe.Pointer(&b[0])), + Len: len(b), + } + return *(*string)(unsafe.Pointer(h)) +} diff --git a/dialer/protocol/unsafeslice_test.go b/dialer/protocol/unsafeslice_test.go new file mode 100644 index 0000000..c767f4c --- /dev/null +++ b/dialer/protocol/unsafeslice_test.go @@ -0,0 +1,63 @@ +package protocol + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnsafeSlice(t *testing.T) { + bs := ByteSliceFromInt8Slice([]int8{0, 1, 2}) + assert.Equal(t, bs, []byte{0, 1, 2}) + + i8 := Int8SliceFromByteSlice(bs) + assert.Equal(t, i8, []int8{0, 1, 2}) + + bs = ByteSliceFromUint8Slice([]uint8{0, 1, 2}) + assert.Equal(t, bs, []byte{0, 1, 2}) + + u8 := Uint8SliceFromByteSlice(bs) + assert.Equal(t, u8, []uint8{0, 1, 2}) + + bs = ByteSliceFromInt16Slice([]int16{0, 1, 2}) + assert.Equal(t, bs, []byte{0, 0, 1, 0, 2, 0}) + + i16 := Int16SliceFromByteSlice(bs) + assert.Equal(t, i16, []int16{0, 1, 2}) + + bs = ByteSliceFromUint16Slice([]uint16{0, 1, 2}) + assert.Equal(t, bs, []byte{0, 0, 1, 0, 2, 0}) + + u16 := Uint16SliceFromByteSlice(bs) + assert.Equal(t, u16, []uint16{0, 1, 2}) + + bs = ByteSliceFromInt32Slice([]int32{0, 1, 2}) + assert.Equal(t, bs, []byte{0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0}) + + i32 := Int32SliceFromByteSlice(bs) + assert.Equal(t, i32, []int32{0, 1, 2}) + + bs = ByteSliceFromUint32Slice([]uint32{0, 1, 2}) + assert.Equal(t, bs, []byte{0, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0}) + + u32 := Uint32SliceFromByteSlice(bs) + assert.Equal(t, u32, []uint32{0, 1, 2}) + + bs = ByteSliceFromInt64Slice([]int64{0, 1, 2}) + assert.Equal(t, bs, []byte{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0}) + + i64 := Int64SliceFromByteSlice(bs) + assert.Equal(t, i64, []int64{0, 1, 2}) + + bs = ByteSliceFromUint64Slice([]uint64{0, 1, 2}) + assert.Equal(t, bs, []byte{0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0}) + + u64 := Uint64SliceFromByteSlice(bs) + assert.Equal(t, u64, []uint64{0, 1, 2}) + + bs = ByteSliceFromString("test") + assert.Equal(t, string(bs), "test") + + s := StringFromByteSlice(bs) + assert.Equal(t, s, "test") +} diff --git a/dialer/protocol/writer.go b/dialer/protocol/writer.go new file mode 100644 index 0000000..8f92850 --- /dev/null +++ b/dialer/protocol/writer.go @@ -0,0 +1,44 @@ +package protocol + +import ( + "bufio" + "io" +) + +// Writer declares functions to writer data into io.Writer. +type Writer struct { + wr *bufio.Writer +} + +// NewWriter inits a writer with io.Writer. +func NewWriter(wr io.Writer) *Writer { + return &Writer{ + wr: bufio.NewWriterSize(wr, 8192), + } +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. If nn < len(p), +// it also returns an error explaining why the write is short. +func (w *Writer) Write(d []byte) error { + _, err := w.wr.Write(d) + return err +} + +// WriteByte writes a single byte. +func (w *Writer) WriteByte(d byte) error { + return w.wr.WriteByte(d) +} + +// WriteString writes a string. It returns the number of bytes written. +// If the count is less than len(s), it also returns an error +// explaining why the write is short. +func (w *Writer) WriteString(d string) error { + _, err := w.wr.WriteString(d) + return err +} + +// Flush writes any buffered data to the underlying io.Writer. +func (w *Writer) Flush() error { + return w.wr.Flush() +} diff --git a/dialer/protocol/writer_test.go b/dialer/protocol/writer_test.go new file mode 100644 index 0000000..8f4ad56 --- /dev/null +++ b/dialer/protocol/writer_test.go @@ -0,0 +1,25 @@ +package protocol + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriter(t *testing.T) { + by := bytes.NewBuffer(nil) + w := NewWriter(by) + err := w.WriteByte(byte(0)) + assert.Nil(t, err) + + err = w.Write([]byte{1, 2}) + assert.Nil(t, err) + + err = w.WriteString("test") + assert.Nil(t, err) + + err = w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x0, 0x1, 0x2, 0x74, 0x65, 0x73, 0x74}) +} diff --git a/dialer/request.go b/dialer/request.go new file mode 100644 index 0000000..551d2f3 --- /dev/null +++ b/dialer/request.go @@ -0,0 +1,77 @@ +package dialer + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/dolphindb/api-go/model" +) + +type requestParams struct { + commandType string + + SessionID []byte + ByteOrder byte + Command []byte + Args []model.DataForm +} + +func writeRequest(wr *protocol.Writer, params *requestParams, opt *BehaviorOptions) error { + writeHeader(wr, opt, params.SessionID, len(params.Command), params.commandType) + err := writeCommand(wr, params.Command) + if err != nil { + return err + } + + err = writeArgs(wr, params.ByteOrder, params.Args) + if err != nil { + return err + } + + return nil +} + +func writeFlag(opt *BehaviorOptions) []byte { + bs := bytes.Buffer{} + + bs.WriteString(fmt.Sprintf(" / %d_1_%d_%d", generatorRequestFlag(false), opt.GetPriority(), opt.GetParallelism())) + if opt.GetFetchSize() > 0 { + bs.WriteString(fmt.Sprintf("__%d", opt.GetFetchSize())) + } + + return bs.Bytes() +} + +func writeHeader(w *protocol.Writer, opt *BehaviorOptions, sessionID []byte, commandLength int, commandType string) { + _ = w.Write(protocol.APIBytes) + _ = w.WriteByte(protocol.EmptySpace) + _ = w.Write(sessionID) + _ = w.WriteByte(protocol.EmptySpace) + _ = w.Write([]byte(strconv.Itoa(commandLength))) + if commandType == scriptCmd || commandType == functionCmd { + _ = w.Write(writeFlag(opt)) + } + _ = w.WriteByte(protocol.NewLine) +} + +func writeCommand(w *protocol.Writer, command []byte) error { + return w.Write(command) +} + +func writeArgs(w *protocol.Writer, bo byte, args []model.DataForm) error { + b := protocol.GetByteOrder(bo) + for _, arg := range args { + if arg == nil { + continue + } + + err := arg.Render(w, b) + if err != nil { + return err + } + } + + return nil +} diff --git a/dialer/request_test.go b/dialer/request_test.go new file mode 100644 index 0000000..497606b --- /dev/null +++ b/dialer/request_test.go @@ -0,0 +1 @@ +package dialer diff --git a/dialer/response.go b/dialer/response.go new file mode 100644 index 0000000..747820a --- /dev/null +++ b/dialer/response.go @@ -0,0 +1,75 @@ +package dialer + +import ( + "bytes" + "fmt" + "strconv" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/dolphindb/api-go/errors" + "github.com/dolphindb/api-go/model" +) + +type responseHeader struct { + sessionID []byte + objectCount int + byteOrder protocol.ByteOrder +} + +func (c *conn) parseResponse(reader protocol.Reader) (*responseHeader, model.DataForm, error) { + h, err := c.parseResponseHeader(reader) + if err != nil { + return nil, nil, err + } + + err = c.validateResponseOK(reader) + if err != nil { + return nil, nil, err + } + + di, err := c.parseResponseContent(reader, h.objectCount, h.byteOrder) + return h, di, err +} + +func (c *conn) parseResponseHeader(reader protocol.Reader) (*responseHeader, error) { + bs, err := reader.ReadBytes(protocol.NewLine) + if err != nil { + return nil, err + } + + tmp := bytes.Split(bs, []byte{protocol.EmptySpace}) + if len(tmp) < 3 { + return nil, errors.InvalidResponseError(fmt.Sprintf("first line items count [%d] is less than 3", len(tmp))) + } + + h := &responseHeader{} + h.sessionID = tmp[0] + h.objectCount, _ = strconv.Atoi(string(tmp[1])) + h.byteOrder = protocol.GetByteOrder(tmp[2][0]) + + return h, err +} + +func (c *conn) validateResponseOK(reader protocol.Reader) error { + bs, err := reader.ReadBytes(protocol.NewLine) + if err != nil { + return err + } + + if !bytes.Equal(bs, protocol.RespOK) { + return errors.ResponseNotOKError(bs) + } + + return nil +} + +func (c *conn) parseResponseContent(r protocol.Reader, objCount int, bo protocol.ByteOrder) (model.DataForm, error) { + switch objCount { + case 0: + return nil, nil + case 1: + return model.ParseDataForm(r, bo) + } + + return nil, nil +} diff --git a/dialer/response_test.go b/dialer/response_test.go new file mode 100644 index 0000000..497606b --- /dev/null +++ b/dialer/response_test.go @@ -0,0 +1 @@ +package dialer diff --git a/dialer/util.go b/dialer/util.go new file mode 100644 index 0000000..0bdf382 --- /dev/null +++ b/dialer/util.go @@ -0,0 +1,58 @@ +package dialer + +import ( + "bytes" + "strconv" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/dolphindb/api-go/model" +) + +func generateScriptCommand(cmdStr string) []byte { + bs := bytes.Buffer{} + bs.WriteString(scriptCmd) + bs.WriteByte(protocol.NewLine) + bs.WriteString(cmdStr) + return bs.Bytes() +} + +func generateFunctionCommand(cmdStr string, bo byte, args []model.DataForm) []byte { + bs := bytes.Buffer{} + bs.WriteString(functionCmd) + bs.WriteByte(protocol.NewLine) + bs.WriteString(cmdStr) + bs.WriteByte(protocol.NewLine) + bs.WriteString(strconv.Itoa(len(args))) + bs.WriteByte(protocol.NewLine) + bs.WriteByte(bo) + bs.WriteByte(protocol.NewLine) + return bs.Bytes() +} + +func generateConnectionCommand() []byte { + bs := bytes.Buffer{} + bs.WriteString(connectCmd) + bs.WriteByte(protocol.NewLine) + return bs.Bytes() +} + +func generateVariableCommand(names string, bo byte, count int) []byte { + bs := bytes.Buffer{} + bs.WriteString(variableCmd) + bs.WriteByte(protocol.NewLine) + bs.WriteString(names) + bs.WriteByte(protocol.NewLine) + bs.WriteString(strconv.Itoa(count)) + bs.WriteByte(protocol.NewLine) + bs.WriteByte(bo) + return bs.Bytes() +} + +func generatorRequestFlag(clear bool) int { + flag := 0 + if clear { + flag += 16 + } + + return flag +} diff --git a/dialer/util_test.go b/dialer/util_test.go new file mode 100644 index 0000000..497606b --- /dev/null +++ b/dialer/util_test.go @@ -0,0 +1 @@ +package dialer diff --git a/domain/domain.go b/domain/domain.go new file mode 100644 index 0000000..4c206f1 --- /dev/null +++ b/domain/domain.go @@ -0,0 +1,101 @@ +package domain + +import ( + "fmt" + + "github.com/dolphindb/api-go/model" +) + +// PartitionType decides how to append partitioned table. +type PartitionType string + +const ( + // SEQ s the string type of PartitionType SEQ. + SEQ PartitionType = "SEQ" + // VALUE s the string type of PartitionType VALUE. + VALUE PartitionType = "VALUE" + // RANGE s the string type of PartitionType RANGE. + RANGE PartitionType = "RANGE" + // LIST s the string type of PartitionType LIST. + LIST PartitionType = "LIST" + // COMPO s the string type of PartitionType COMPO. + COMPO PartitionType = "COMPO" + // HASH s the string type of PartitionType HASH. + HASH PartitionType = "HASH" +) + +// Domain interface declares functions to get partition keys. +type Domain interface { + // GetPartitionKeys returns partition keys for partitioned table append + GetPartitionKeys(partitionCol *model.Vector) ([]int, error) +} + +// CreateDomain inits a Domain according to the pt. +func CreateDomain(p PartitionType, d model.DataTypeByte, schema model.DataForm) (Domain, error) { + switch p { + case HASH: + dataCat := model.GetCategory(d) + s := schema.(*model.Scalar) + val := s.DataType.Value() + + return &HashDomain{ + dt: d, + cat: dataCat, + buckets: int(val.(int32)), + }, nil + case VALUE: + vct := schema.(*model.Vector) + return &ValueDomain{ + dt: vct.GetDataType(), + cat: model.GetCategory(vct.GetDataType()), + }, nil + case RANGE: + vct := schema.(*model.Vector) + return &RangeDomain{ + dt: vct.GetDataType(), + cat: model.GetCategory(vct.GetDataType()), + rangeVector: vct, + }, nil + case LIST: + vct := schema.(*model.Vector) + if vct.GetDataType() == model.DtAny { + d = vct.Data.ElementValue(0).(model.DataForm).GetDataType() + } else { + d = vct.GetDataType() + } + + return NewListDomain(vct, d, model.GetCategory(d)) + } + + return nil, fmt.Errorf("unsupported partition type %s", p) +} + +// GetPartitionType returns the string format of PartitionType with the ind. +// You can get the ind when you run schema(). +func GetPartitionType(ind int) PartitionType { + switch ind { + case 0: + return SEQ + case 1: + return VALUE + case 2: + return RANGE + case 3: + return LIST + case 4: + return COMPO + case 5: + return HASH + default: + return SEQ + } +} + +func getVectorRealDataType(vct *model.Vector) model.DataTypeByte { + dt := vct.GetDataType() + if dt == model.DtAny { + dt = vct.Data.ElementValue(0).(model.DataForm).GetDataType() + } + + return dt +} diff --git a/domain/domain_test.go b/domain/domain_test.go new file mode 100644 index 0000000..9c19804 --- /dev/null +++ b/domain/domain_test.go @@ -0,0 +1,71 @@ +package domain + +import ( + "testing" + + "github.com/dolphindb/api-go/model" + + "github.com/stretchr/testify/assert" +) + +func TestDomain(t *testing.T) { + dtb := model.DtString + + dtl, err := model.NewDataTypeListWithRaw(dtb, []string{"domain"}) + assert.Nil(t, err) + + schema := model.NewVector(dtl) + + pt := GetPartitionType(0) + _, err = CreateDomain(pt, dtb, schema) + assert.Equal(t, "unsupported partition type SEQ", err.Error()) + + pt = GetPartitionType(1) + domain, err := CreateDomain(pt, dtb, schema) + assert.Nil(t, err) + + vd := domain.(*ValueDomain) + assert.Equal(t, vd.dt, dtb) + assert.Equal(t, vd.cat, model.GetCategory(dtb)) + + pt = GetPartitionType(2) + domain, err = CreateDomain(pt, dtb, schema) + assert.Nil(t, err) + + rd := domain.(*RangeDomain) + assert.Equal(t, rd.dt, dtb) + assert.Equal(t, rd.cat, model.GetCategory(dtb)) + + dtl, err = model.NewDataTypeListWithRaw(model.DtAny, []model.DataForm{schema}) + assert.Nil(t, err) + + schema = model.NewVector(dtl) + + pt = GetPartitionType(3) + domain, err = CreateDomain(pt, dtb, schema) + assert.Nil(t, err) + + ld := domain.(*ListDomain) + assert.Equal(t, ld.dt, dtb) + assert.Equal(t, ld.cat, model.GetCategory(dtb)) + + pt = GetPartitionType(4) + _, err = CreateDomain(pt, dtb, schema) + assert.Equal(t, "unsupported partition type COMPO", err.Error()) + + dt, err := model.NewDataType(model.DtInt, int32(10)) + assert.Nil(t, err) + + sca := model.NewScalar(dt) + pt = GetPartitionType(5) + domain, err = CreateDomain(pt, dtb, sca) + assert.Nil(t, err) + + hd := domain.(*HashDomain) + assert.Equal(t, hd.dt, dtb) + assert.Equal(t, hd.cat, model.GetCategory(dtb)) + + pt = GetPartitionType(6) + _, err = CreateDomain(pt, dtb, schema) + assert.Equal(t, "unsupported partition type SEQ", err.Error()) +} diff --git a/domain/hash_domain.go b/domain/hash_domain.go new file mode 100644 index 0000000..5bc48e8 --- /dev/null +++ b/domain/hash_domain.go @@ -0,0 +1,43 @@ +package domain + +import ( + "errors" + "fmt" + + "github.com/dolphindb/api-go/model" +) + +// HashDomain implements the Domain interface. +// You can use it to calculate partition keys with HASH partitionType. +type HashDomain struct { + buckets int + + dt model.DataTypeByte + cat model.CategoryString +} + +// GetPartitionKeys returns partition keys for partitioned table append. +func (h *HashDomain) GetPartitionKeys(partitionCol *model.Vector) ([]int, error) { + pdt := getVectorRealDataType(partitionCol) + if h.cat != model.GetCategory(pdt) { + return nil, errors.New("data category incompatible") + } + + if h.cat == model.TEMPORAL && h.dt != pdt { + df, err := model.CastDateTime(partitionCol, h.dt) + if err != nil { + return nil, fmt.Errorf("can't convert type from %s to %s", + model.GetDataTypeString(pdt), model.GetDataTypeString(h.dt)) + } + + partitionCol = df.(*model.Vector) + } + + rows := partitionCol.Rows() + keys := make([]int, rows) + for i := 0; i < rows; i++ { + keys[i] = partitionCol.HashBucket(i, h.buckets) + } + + return keys, nil +} diff --git a/domain/hash_domain_test.go b/domain/hash_domain_test.go new file mode 100644 index 0000000..683da1c --- /dev/null +++ b/domain/hash_domain_test.go @@ -0,0 +1,33 @@ +package domain + +import ( + "testing" + "time" + + "github.com/dolphindb/api-go/model" + + "github.com/stretchr/testify/assert" +) + +func TestHashDomain(t *testing.T) { + hd := &HashDomain{ + buckets: 10, + dt: model.DtDate, + cat: model.GetCategory(model.DtDate), + } + + dtl, err := model.NewDataTypeListWithRaw(model.DtString, []string{"domain"}) + assert.Nil(t, err) + + pv := model.NewVector(dtl) + _, err = hd.GetPartitionKeys(pv) + assert.Equal(t, "data category incompatible", err.Error()) + + dtl, err = model.NewDataTypeListWithRaw(model.DtDatetime, []time.Time{time.Date(2022, time.Month(1), 1, 1, 1, 1, 1, time.UTC)}) + assert.Nil(t, err) + + pv = model.NewVector(dtl) + keys, err := hd.GetPartitionKeys(pv) + assert.Nil(t, err) + assert.Equal(t, keys[0], 3) +} diff --git a/domain/list_domain.go b/domain/list_domain.go new file mode 100644 index 0000000..0c99289 --- /dev/null +++ b/domain/list_domain.go @@ -0,0 +1,81 @@ +package domain + +import ( + "errors" + "fmt" + + "github.com/dolphindb/api-go/model" +) + +// ListDomain implements the Domain interface. +// You can use it to calculate partition keys with LIST partitionType. +type ListDomain struct { + dict map[string]int + + dt model.DataTypeByte + cat model.CategoryString +} + +// NewListDomain inits a ListDomain object. +func NewListDomain(vct *model.Vector, d model.DataTypeByte, cat model.CategoryString) (*ListDomain, error) { + ld := &ListDomain{ + dt: d, + cat: cat, + dict: make(map[string]int), + } + + if vct.GetDataType() != model.DtAny { + return nil, errors.New("the input list must be a tuple") + } + + row := vct.Rows() + for i := 0; i < row; i++ { + cur := vct.Data.ElementValue(i).(model.DataForm) + if cur.GetDataForm() == model.DfScalar { + s := cur.(*model.Scalar) + key := s.DataType.String() + ld.dict[key] = i + } else { + vec := cur.(*model.Vector) + r := vec.Rows() + for j := 0; j < r; j++ { + key := vec.Data.ElementString(j) + ld.dict[key] = i + } + } + } + + return ld, nil +} + +// GetPartitionKeys returns partition keys for partitioned table append. +func (l *ListDomain) GetPartitionKeys(partitionCol *model.Vector) ([]int, error) { + pdt := getVectorRealDataType(partitionCol) + if l.cat != model.GetCategory(pdt) { + return nil, errors.New("data category incompatible") + } + + if l.cat == model.TEMPORAL && l.dt != pdt { + df, err := model.CastDateTime(partitionCol, l.dt) + if err != nil { + return nil, fmt.Errorf("can't convert type from %s to %s", + model.GetDataTypeString(pdt), model.GetDataTypeString(l.dt)) + } + + partitionCol = df.(*model.Vector) + } + + row := partitionCol.Rows() + res := make([]int, row) + for i := 0; i < row; i++ { + key := partitionCol.Data.ElementString(i) + ind, ok := l.dict[key] + if !ok { + res[i] = -1 + } else { + res[i] = ind + } + } + + return res, nil +} diff --git a/domain/list_domain_test.go b/domain/list_domain_test.go new file mode 100644 index 0000000..421e720 --- /dev/null +++ b/domain/list_domain_test.go @@ -0,0 +1,33 @@ +package domain + +import ( + "testing" + "time" + + "github.com/dolphindb/api-go/model" + + "github.com/stretchr/testify/assert" +) + +func TestListDomain(t *testing.T) { + dt, err := model.NewDataType(model.DtDate, time.Date(2022, time.Month(1), 1, 1, 1, 1, 1, time.UTC)) + assert.Nil(t, err) + + val := model.NewScalar(dt) + + dtl, err := model.NewDataTypeListWithRaw(model.DtAny, []model.DataForm{val}) + assert.Nil(t, err) + + vct := model.NewVector(dtl) + + ld, err := NewListDomain(vct, model.DtDate, model.TEMPORAL) + assert.Nil(t, err) + + dtl, err = model.NewDataTypeListWithRaw(model.DtDatetime, []time.Time{time.Date(2022, time.Month(1), 1, 1, 1, 1, 1, time.UTC)}) + assert.Nil(t, err) + + pv := model.NewVector(dtl) + keys, err := ld.GetPartitionKeys(pv) + assert.Nil(t, err) + assert.Equal(t, keys, []int{0}) +} diff --git a/domain/range_domain.go b/domain/range_domain.go new file mode 100644 index 0000000..470b846 --- /dev/null +++ b/domain/range_domain.go @@ -0,0 +1,49 @@ +package domain + +import ( + "errors" + "fmt" + + "github.com/dolphindb/api-go/model" +) + +// RangeDomain implements the Domain interface. +// You can use it to calculate partition keys with RANGE partitionType. +type RangeDomain struct { + rangeVector *model.Vector + dt model.DataTypeByte + cat model.CategoryString +} + +// GetPartitionKeys returns partition keys for partitioned table append. +func (r *RangeDomain) GetPartitionKeys(partitionCol *model.Vector) ([]int, error) { + pdt := getVectorRealDataType(partitionCol) + if r.cat != model.GetCategory(pdt) { + return nil, errors.New("data category incompatible") + } + + cg := model.GetCategory(r.dt) + if cg == model.TEMPORAL && r.dt != partitionCol.GetDataType() { + df, err := model.CastDateTime(partitionCol, r.dt) + if err != nil { + return nil, fmt.Errorf("can't convert type from %s to %s", + model.GetDataTypeString(pdt), model.GetDataTypeString(r.dt)) + } + + partitionCol = df.(*model.Vector) + } + + partitions := r.rangeVector.Rows() - 1 + row := partitionCol.Rows() + res := make([]int, row) + for i := 0; i < row; i++ { + ind := r.rangeVector.AsOf(partitionCol.Data.Get(i)) + if ind >= partitions { + res[i] = -1 + } else { + res[i] = ind + } + } + + return res, nil +} diff --git a/domain/range_domain_test.go b/domain/range_domain_test.go new file mode 100644 index 0000000..eef1803 --- /dev/null +++ b/domain/range_domain_test.go @@ -0,0 +1,48 @@ +package domain + +import ( + "testing" + + "github.com/dolphindb/api-go/model" + + "github.com/stretchr/testify/assert" +) + +func TestRangeDomain(t *testing.T) { + dtl, err := model.NewDataTypeListWithRaw(model.DtString, []string{"domain", "sample", "zero"}) + assert.Nil(t, err) + + schema := model.NewVector(dtl) + + rd := &RangeDomain{ + rangeVector: schema, + dt: model.DtString, + cat: model.LITERAL, + } + + dtl, err = model.NewDataTypeListWithRaw(model.DtBool, []byte{1}) + assert.Nil(t, err) + + schema = model.NewVector(dtl) + + _, err = rd.GetPartitionKeys(schema) + assert.Equal(t, err.Error(), "data category incompatible") + + dtl, err = model.NewDataTypeListWithRaw(model.DtString, []string{"domain"}) + assert.Nil(t, err) + + schema = model.NewVector(dtl) + + keys, err := rd.GetPartitionKeys(schema) + assert.Nil(t, err) + assert.Equal(t, keys, []int{0}) + + dtl, err = model.NewDataTypeListWithRaw(model.DtString, []string{"sample"}) + assert.Nil(t, err) + + schema = model.NewVector(dtl) + + keys, err = rd.GetPartitionKeys(schema) + assert.Nil(t, err) + assert.Equal(t, keys, []int{1}) +} diff --git a/domain/value_domain.go b/domain/value_domain.go new file mode 100644 index 0000000..295d1f8 --- /dev/null +++ b/domain/value_domain.go @@ -0,0 +1,44 @@ +package domain + +import ( + "errors" + "fmt" + + "github.com/dolphindb/api-go/model" +) + +// ValueDomain implements the Domain interface. +// You can use it to calculate partition keys with VALUE partitionType. +type ValueDomain struct { + dt model.DataTypeByte + cat model.CategoryString +} + +// GetPartitionKeys returns partition keys for partitioned table append. +func (v *ValueDomain) GetPartitionKeys(partitionCol *model.Vector) ([]int, error) { + pdt := getVectorRealDataType(partitionCol) + if v.cat != model.GetCategory(pdt) { + return nil, errors.New("data category incompatible") + } + + if v.cat == model.TEMPORAL && v.dt != pdt { + df, err := model.CastDateTime(partitionCol, v.dt) + if err != nil { + return nil, fmt.Errorf("can't convert type from %s to %s", model.GetDataTypeString(pdt), model.GetDataTypeString(v.dt)) + } + + partitionCol = df.(*model.Vector) + } + + if v.dt == model.DtLong { + return nil, errors.New("the partitioning column cannot be of long type") + } + + row := partitionCol.Rows() + res := make([]int, row) + for i := 0; i < row; i++ { + res[i] = partitionCol.Data.Get(i).HashBucket(1048576) + } + + return res, nil +} diff --git a/domain/value_domain_test.go b/domain/value_domain_test.go new file mode 100644 index 0000000..239040c --- /dev/null +++ b/domain/value_domain_test.go @@ -0,0 +1,25 @@ +package domain + +import ( + "testing" + "time" + + "github.com/dolphindb/api-go/model" + + "github.com/stretchr/testify/assert" +) + +func TestValueDomain(t *testing.T) { + vd := &ValueDomain{ + dt: model.DtDate, + cat: model.TEMPORAL, + } + + dtl, err := model.NewDataTypeListWithRaw(model.DtDatetime, []time.Time{time.Date(2022, time.Month(1), 1, 1, 1, 1, 1, time.UTC)}) + assert.Nil(t, err) + + pc := model.NewVector(dtl) + keys, err := vd.GetPartitionKeys(pc) + assert.Nil(t, err) + assert.Equal(t, keys, []int{18993}) +} diff --git a/errors/protocol_errors.go b/errors/protocol_errors.go new file mode 100644 index 0000000..831c0b3 --- /dev/null +++ b/errors/protocol_errors.go @@ -0,0 +1,32 @@ +package errors + +import ( + "fmt" +) + +var ( +// ErrHeaderWrongPart = errors.New("HEADER not 3 PARTS") +// ErrHeaderInvalidEndianness = errors.New("HEADER CONTAINS INVALID ENDIANNESS BYTE") +// ErrParseVectorFailed = errors.New("PARSE VECTOR FAILED") +// ErrParseScalarFailed = errors.New("PARSE SCALAR FAILED") +) + +// InvalidResponseError ... +func InvalidResponseError(msg string) error { + return fmt.Errorf("invalid response format. %s", msg) +} + +// InvalidByteOrderError ... +func InvalidByteOrderError(b byte) error { + return fmt.Errorf("invalid byte order %v", b) +} + +// ResponseNotOKError ... +func ResponseNotOKError(resp []byte) error { + return fmt.Errorf("client error response. %v", string(resp)) +} + +// ReadDataTypeAndDataFormError ... +func ReadDataTypeAndDataFormError(msg string) error { + return fmt.Errorf("failed to read DataType and DataForm. %s", msg) +} diff --git a/errors/protocol_errors_test.go b/errors/protocol_errors_test.go new file mode 100644 index 0000000..910aed1 --- /dev/null +++ b/errors/protocol_errors_test.go @@ -0,0 +1,21 @@ +package errors + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrors(t *testing.T) { + err := InvalidResponseError("invalid") + assert.Equal(t, err.Error(), "invalid response format. invalid") + + err = InvalidByteOrderError(2) + assert.Equal(t, err.Error(), "invalid byte order 2") + + err = ResponseNotOKError([]byte("internal error")) + assert.Equal(t, err.Error(), "client error response. internal error") + + err = ReadDataTypeAndDataFormError("%$") + assert.Equal(t, err.Error(), "failed to read DataType and DataForm. %$") +} diff --git a/example/apis/account.go b/example/apis/account.go new file mode 100644 index 0000000..d6a57f1 --- /dev/null +++ b/example/apis/account.go @@ -0,0 +1,24 @@ +package apis + +import ( + "fmt" + + "github.com/dolphindb/api-go/api" +) + +// Login checks whether the Login api is valid. +func Login(db api.DolphinDB) error { + l := new(api.LoginRequest). + SetUserID(User). + SetPassword(Password) + err := db.Login(l) + fmt.Println("Login") + return err +} + +// Logout checks whether the Logout api is valid. +func Logout(db api.DolphinDB) error { + err := db.Logout() + fmt.Println("Logout") + return err +} diff --git a/example/apis/const.go b/example/apis/const.go new file mode 100644 index 0000000..43574f2 --- /dev/null +++ b/example/apis/const.go @@ -0,0 +1,19 @@ +package apis + +const ( + // User is the Dolphindb userID. + User = "admin" + // Password is password of the user. + Password = "123456" + // TestAddr is the Dolphindb server address. + TestAddr = "127.0.0.1:8848" + + dbPath = "/tmp/db" + segmentDBPath = "dfs://db" + tableName = "test" + segmentTableName = "segment" + partitionedTableName = "partitioned" + dbName = "db" + remoteFilePath = "/home/zcwen/stock.csv" + loadSQL = "select name,id,value from %s" +) diff --git a/example/apis/database.go b/example/apis/database.go new file mode 100644 index 0000000..fdd238c --- /dev/null +++ b/example/apis/database.go @@ -0,0 +1,64 @@ +package apis + +import ( + "fmt" + + "github.com/dolphindb/api-go/api" +) + +// Database checks whether the Database api is valid. +func Database(db api.DolphinDB) (*api.Database, error) { + d := new(api.DatabaseRequest). + SetDirectory(dbPath). + SetDBHandle(dbName) + dt, err := db.Database(d) + + fmt.Println("CreateDatabase") + return dt, err +} + +// SegmentDatabase checks whether the Database api is valid when db is dfs db. +func SegmentDatabase(db api.DolphinDB) (*api.Database, error) { + d := new(api.DatabaseRequest). + SetDirectory(segmentDBPath). + SetPartitionType("VALUE"). + SetPartitionScheme("1..10"). + SetLocations(""). + SetEngine(""). + SetAtomic(""). + SetDBHandle(dbName) + dt, err := db.Database(d) + + fmt.Println("CreateSegmentDatabase") + return dt, err +} + +// DropSegmentDatabase checks whether the DropDatabase api is valid when db is dfs db. +func DropSegmentDatabase(db api.DolphinDB) error { + d := new(api.DropDatabaseRequest). + SetDirectory(segmentDBPath) + err := db.DropDatabase(d) + + fmt.Println("DropSegmentDatabase") + return err +} + +// DropDatabase checks whether the DropDatabase api is valid. +func DropDatabase(db api.DolphinDB) error { + d := new(api.DropDatabaseRequest). + SetDirectory(dbPath) + err := db.DropDatabase(d) + + fmt.Println("DropDatabase") + return err +} + +// ExistsDatabase checks whether the ExistsDatabase api is valid. +func ExistsDatabase(db api.DolphinDB) error { + d := new(api.ExistsDatabaseRequest). + SetPath(dbPath) + b, err := db.ExistsDatabase(d) + + fmt.Println("ExistsDatabase", b) + return err +} diff --git a/example/apis/pool.go b/example/apis/pool.go new file mode 100644 index 0000000..49291c2 --- /dev/null +++ b/example/apis/pool.go @@ -0,0 +1,242 @@ +package apis + +import ( + "bytes" + "context" + "fmt" + "math/rand" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/example/util" + "github.com/dolphindb/api-go/model" +) + +func dropDatabase(db api.DolphinDB, dbPath string) { + script := bytes.NewBufferString(fmt.Sprintf("if(existsDatabase(\"%s\")){\n", dbPath)) + script.WriteString(fmt.Sprintf(" dropDatabase(\"%s\")\n", dbPath)) + script.WriteString("}\n") + _, err := db.RunScript(script.String()) + util.AssertNil(err) +} + +// PartitionedTableAppenderWithValueDomain checks whether the PartitionedTableAppender is valid with value domain. +func PartitionedTableAppenderWithValueDomain(db api.DolphinDB) { + script := bytes.NewBufferString("t = table(timestamp(1..10) as date,string(1..10) as sym)\n") + script.WriteString("db1=database(\"\",HASH,[DATETIME,10])\n") + script.WriteString("db2=database(\"\",VALUE,string(1..10))\n") + script.WriteString("if(existsDatabase(\"dfs://demohash\")){\n") + script.WriteString(" dropDatabase(\"dfs://demohash\")\n") + script.WriteString("}\n") + script.WriteString("db =database(\"dfs://demohash\",COMPO,[db1,db2])\n") + script.WriteString("pt = db.createPartitionedTable(t,`pt,`date`sym)\n") + + _, err := db.RunScript(script.String()) + util.AssertNil(err) + + defer dropDatabase(db, "dfs://demohash") + + poolOpt := &api.PoolOption{ + Address: TestAddr, + UserID: User, + Password: Password, + PoolSize: 3, + } + + pool, err := api.NewDBConnectionPool(poolOpt) + util.AssertNil(err) + + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + + appender, err := api.NewPartitionedTableAppender(appenderOpt) + util.AssertNil(err) + + colNames := []string{"date", "sym"} + cols := make([]*model.Vector, 2) + + times := make([]time.Time, 10000) + for i := 0; i < 10000; i++ { + times[i] = time.Now() + } + + l, err := model.NewDataTypeListWithRaw(model.DtTimestamp, times) + util.AssertNil(err) + + cols[0] = model.NewVector(l) + + sym := make([]string, 10000) + for i := 0; i < 10000; i += 4 { + sym[i] = "2" + sym[i+1] = "3" + sym[i+2] = "4" + sym[i+3] = "5" + } + + l, err = model.NewDataTypeListWithRaw(model.DtString, sym) + util.AssertNil(err) + + cols[1] = model.NewVector(l) + for i := 0; i < 1000; i++ { + m, err := appender.Append(model.NewTable(colNames, cols)) + util.AssertNil(err) + util.AssertEqual(m, 10000) + } + + err = appender.Close() + util.AssertNil(err) + + df, err := db.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\nexec count(*) from pt") + util.AssertNil(err) + util.AssertEqual(df.String(), "long(10000000)") + + fmt.Println("Run PartitionedTableAppenderWithValueDomain successful") +} + +// PartitionedTableAppenderWithHashDomain checks whether the PartitionedTableAppender is valid with hash domain. +func PartitionedTableAppenderWithHashDomain(db api.DolphinDB) { + script := bytes.NewBufferString("t = table(timestamp(1..10) as date,int(1..10) as sym)\n") + script.WriteString("db1=database(\"\",HASH,[DATETIME,10])\n") + script.WriteString("db2=database(\"\",HASH,[INT,5])\n") + script.WriteString("if(existsDatabase(\"dfs://demohash\")){\n") + script.WriteString(" dropDatabase(\"dfs://demohash\")}\n") + script.WriteString("db =database(\"dfs://demohash\",COMPO,[db2,db1])\n") + script.WriteString("pt = db.createPartitionedTable(t,`pt,`sym`date)") + + _, err := db.RunScript(script.String()) + util.AssertNil(err) + + defer dropDatabase(db, "dfs://demohash") + + poolOpt := &api.PoolOption{ + Address: TestAddr, + UserID: User, + Password: Password, + PoolSize: 3, + } + + pool, err := api.NewDBConnectionPool(poolOpt) + util.AssertNil(err) + + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + + appender, err := api.NewPartitionedTableAppender(appenderOpt) + util.AssertNil(err) + + colNames := []string{"date", "sym"} + cols := make([]*model.Vector, 2) + + times := make([]time.Time, 10000) + for i := 0; i < 10000; i++ { + times[i] = time.Date(2020, time.Month(5), 06, 21, 01, 48, 200, time.UTC) + } + + l, err := model.NewDataTypeListWithRaw(model.DtTimestamp, times) + util.AssertNil(err) + + cols[0] = model.NewVector(l) + + sym := make([]int32, 10000) + for i := 0; i < 10000; i += 4 { + sym[i] = int32(1) + sym[i+1] = int32(23) + sym[i+2] = int32(325) + sym[i+3] = int32(11) + } + + l, err = model.NewDataTypeListWithRaw(model.DtInt, sym) + util.AssertNil(err) + + cols[1] = model.NewVector(l) + for i := 0; i < 1000; i++ { + m, err := appender.Append(model.NewTable(colNames, cols)) + util.AssertNil(err) + util.AssertEqual(m, 10000) + } + + err = appender.Close() + util.AssertNil(err) + + df, err := db.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\nexec count(*) from pt") + util.AssertNil(err) + util.AssertEqual(df.String(), "long(10000000)") + + fmt.Println("Run PartitionedTableAppenderWithHashDomain successful") +} + +// TableAppender checks whether the TableAppender is valid. +func TableAppender(db api.DolphinDB) { + script := bytes.NewBufferString("dbPath = \"dfs://tableAppenderTest\"\n") + script.WriteString("if(existsDatabase(dbPath)){\n") + script.WriteString(" dropDatabase(dbPath)\n") + script.WriteString("}\n") + script.WriteString("t = table(100:0,`id`time`data,[INT,TIME,DOUBLE])\n") + script.WriteString("db=database(dbPath,HASH, [INT,10])\n") + script.WriteString("pt = db.createPartitionedTable(t,`testAppend,`id)\n") + + _, err := db.RunScript(script.String()) + util.AssertNil(err) + + defer dropDatabase(db, "dfs://tableAppenderTest") + + conn, err := dialer.NewSimpleConn(context.TODO(), TestAddr, User, Password) + util.AssertNil(err) + + opt := &api.TableAppenderOption{ + DBPath: "dfs://tableAppenderTest", + TableName: "testAppend", + Conn: conn, + } + + appender := api.NewTableAppender(opt) + util.AssertNil(err) + + tb := packTable() + _, err = appender.Append(tb) + util.AssertNil(err) + + df, err := db.RunScript("exec count(*) from loadTable(\"dfs://tableAppenderTest\", \"testAppend\")") + util.AssertNil(err) + util.AssertEqual(df.String(), "long(100000)") + + fmt.Println("Run TableAppender successful") +} + +func packTable() *model.Table { + size := 100000 + id := make([]int32, size) + data := make([]float64, size) + ts := make([]time.Time, size) + for i := 0; i < size; i++ { + ts[i] = time.Now() + id[i] = rand.Int31() + data[i] = rand.Float64() + } + + dtl, err := model.NewDataTypeListWithRaw(model.DtInt, id) + util.AssertNil(err) + + idVct := model.NewVector(dtl) + + dtl, err = model.NewDataTypeListWithRaw(model.DtDouble, data) + util.AssertNil(err) + + dataVct := model.NewVector(dtl) + + dtl, err = model.NewDataTypeListWithRaw(model.DtTimestamp, ts) + util.AssertNil(err) + + timeVct := model.NewVector(dtl) + + return model.NewTable([]string{"id", "time", "data"}, []*model.Vector{idVct, timeVct, dataVct}) +} diff --git a/example/apis/table.go b/example/apis/table.go new file mode 100644 index 0000000..9b6140a --- /dev/null +++ b/example/apis/table.go @@ -0,0 +1,172 @@ +package apis + +import ( + "fmt" + + "github.com/dolphindb/api-go/api" +) + +// ExistsTable checks whether the ExistsTable api is valid. +func ExistsTable(db api.DolphinDB) error { + l := new(api.ExistsTableRequest). + SetDBPath(dbPath). + SetTableName(tableName) + b, err := db.ExistsTable(l) + fmt.Println("ExistsTable: ", b) + return err +} + +// SaveTable checks whether the SaveTable api is valid. +func SaveTable(db api.DolphinDB) error { + l := new(api.SaveTableRequest). + SetDBHandle(dbName). + SetTable(tableName) + err := db.SaveTable(l) + fmt.Println("SaveTable") + return err +} + +// TableWithCapacity checks whether the TableWithCapacity api is valid. +func TableWithCapacity(db api.DolphinDB) (*api.Table, error) { + l := new(api.TableWithCapacityRequest). + SetTableName(tableName).SetCapacity(100).SetSize(3). + SetColNames([]string{"name", "id", "value"}). + SetColTypes([]string{"string", "INT", "DOUBLE"}) + t, err := db.TableWithCapacity(l) + fmt.Println("TableWithCapacity", t.String()) + return t, err +} + +// Table checks whether the Table api is valid. +func Table(db api.DolphinDB) (*api.Table, error) { + l := new(api.TableRequest). + SetTableName(tableName). + AddTableParam("id", "`XOM`GS`AAPL"). + AddTableParam("x", "102.1 33.4 73.6") + t, err := db.Table(l) + fmt.Println("Table: ", t) + return t, err +} + +// DropTable checks whether the DropTable api is valid. +func DropTable(db api.DolphinDB) error { + t := new(api.DropTableRequest). + SetTableName(tableName). + SetDBHandle(dbName) + err := db.DropTable(t) + fmt.Println("DropTable") + return err +} + +// DropSegmentTable checks whether the DropTable api is valid when drops dfs table. +func DropSegmentTable(db api.DolphinDB) error { + t := new(api.DropTableRequest). + SetTableName(segmentTableName). + SetDBHandle(dbName) + err := db.DropTable(t) + fmt.Println("DropSegmentTable") + return err +} + +// DropPartitionTable checks whether the DropTable api is valid when drops dfs table. +func DropPartitionTable(db api.DolphinDB) error { + t := new(api.DropTableRequest). + SetTableName(partitionedTableName). + SetDBHandle(dbName) + err := db.DropTable(t) + fmt.Println("DropPartitionTable") + return err +} + +// LoadTable checks whether the LoadTable api is valid. +func LoadTable(db api.DolphinDB) error { + t := new(api.LoadTableRequest). + SetTableName(tableName). + SetDatabase(dbPath) + df, err := db.LoadTable(t) + fmt.Println("LoadTable: ", df) + return err +} + +// LoadTableBySQL checks whether the LoadTableBySQL api is valid. +func LoadTableBySQL(db api.DolphinDB, na string) error { + t := new(api.LoadTableBySQLRequest). + SetSQL(fmt.Sprintf(loadSQL, na)). + SetDBPath(segmentDBPath). + SetTableName(partitionedTableName) + df, err := db.LoadTableBySQL(t) + fmt.Println("LoadTableBySQL: ", df) + return err +} + +// LoadText checks whether the LoadText api is valid. +func LoadText(db api.DolphinDB) error { + t := new(api.LoadTextRequest). + SetFileName(remoteFilePath) + di, err := db.LoadText(t) + fmt.Println("LoadText: ", di) + return err +} + +// PloadText checks whether the PloadText api is valid. +func PloadText(db api.DolphinDB) error { + t := new(api.PloadTextRequest). + SetFileName(remoteFilePath) + di, err := db.PloadText(t) + fmt.Println("PloadText: ", di) + return err +} + +// SaveText checks whether the SaveText api is valid. +func SaveText(db api.DolphinDB) error { + t := new(api.SaveTextRequest). + SetFileName(remoteFilePath). + SetObj(tableName) + err := db.SaveText(t) + fmt.Println("SaveText") + return err +} + +// CreateTable checks whether the CreateTable api is valid. +func CreateTable(db *api.Database) (*api.Table, error) { + c := new(api.CreateTableRequest). + SetSrcTable(tableName). + SetDimensionTableName(segmentTableName) + t, err := db.CreateTable(c) + fmt.Println("CreateTable: ", t) + + return t, err +} + +// CreatePartitionedTable checks whether the CreatePartitionedTable api is valid. +func CreatePartitionedTable(db *api.Database) (*api.Table, error) { + c := new(api.CreatePartitionedTableRequest). + SetSrcTable(tableName). + SetPartitionedTableName(partitionedTableName). + SetPartitionColumns([]string{"id"}) + t, err := db.CreatePartitionedTable(c) + fmt.Println("CreatePartitionedTable: ", t) + + return t, err +} + +// DropPartition checks whether the DropPartition api is valid. +func DropPartition(db api.DolphinDB) error { + t := new(api.DropPartitionRequest). + SetPartitionPaths("GS"). + SetTableName(partitionedTableName). + SetDBHandle(dbName) + err := db.DropPartition(t) + fmt.Println("DropPartition") + return err +} + +// LoadPartitionedTable checks whether the LoadTable api is valid when table is dfs table. +func LoadPartitionedTable(db api.DolphinDB) error { + t := new(api.LoadTableRequest). + SetTableName(partitionedTableName). + SetDatabase(segmentDBPath) + df, err := db.LoadTable(t) + fmt.Println("LoadPartitionedTable: ", df) + return err +} diff --git a/example/example.go b/example/example.go new file mode 100644 index 0000000..726386e --- /dev/null +++ b/example/example.go @@ -0,0 +1,118 @@ +package main + +import ( + "context" + "fmt" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/example/apis" + "github.com/dolphindb/api-go/example/mtw" + "github.com/dolphindb/api-go/example/script" + "github.com/dolphindb/api-go/example/streaming_test" + "github.com/dolphindb/api-go/example/util" +) + +func main() { + // new a DolphinDB client + db, err := api.NewDolphinDBClient(context.TODO(), apis.TestAddr, nil) + util.AssertNil(err) + + // connect to server + err = db.Connect() + util.AssertNil(err) + + // close connection + defer db.Close() + + // log in server + err = apis.Login(db) + util.AssertNil(err) + + // check whether the database exists + err = apis.ExistsDatabase(db) + util.AssertNil(err) + + // create database + _, err = apis.Database(db) + util.AssertNil(err) + + // check whether the database exists + err = apis.ExistsDatabase(db) + util.AssertNil(err) + + // test memory table + _, err = apis.Table(db) + util.AssertNil(err) + err = apis.SaveTable(db) + util.AssertNil(err) + err = apis.SaveText(db) + util.AssertNil(err) + err = apis.LoadText(db) + util.AssertNil(err) + err = apis.PloadText(db) + util.AssertNil(err) + err = apis.LoadTable(db) + util.AssertNil(err) + err = apis.DropTable(db) + util.AssertNil(err) + + err = apis.DropDatabase(db) + util.AssertNil(err) + err = apis.ExistsDatabase(db) + util.AssertNil(err) + + // test partitioned table + sd, err := apis.SegmentDatabase(db) + util.AssertNil(err) + + cta, err := apis.TableWithCapacity(db) + util.AssertNil(err) + dt, err := apis.CreatePartitionedTable(sd) + util.AssertNil(err) + + _, err = db.RunScript(fmt.Sprintf("%s.append!(%s)", dt.Handle, cta.Handle)) + util.AssertNil(err) + + err = apis.LoadPartitionedTable(db) + util.AssertNil(err) + err = apis.LoadTableBySQL(db, dt.Handle) + util.AssertNil(err) + // err = apis.DropPartition(db) + err = apis.DropPartitionTable(db) + util.AssertNil(err) + _, err = apis.CreateTable(sd) + util.AssertNil(err) + + err = apis.DropSegmentTable(db) + util.AssertNil(err) + err = apis.DropSegmentDatabase(db) + util.AssertNil(err) + + // check script,function and upload func + script.CheckDataType(db) + script.CheckDataForm(db) + script.CheckFunction(db) + + // test Appender + apis.PartitionedTableAppenderWithValueDomain(db) + apis.PartitionedTableAppenderWithHashDomain(db) + apis.TableAppender(db) + + // test streaming + streaming_test.GoroutineClient(db) + streaming_test.PollingClient(db) + streaming_test.GoroutinePooledClient(db) + + //test mtw + mtw.MultiGoroutineDfsTable() + mtw.MultiGoroutineTable() + + // clear cache + err = db.UndefAll() + util.AssertNil(err) + c := new(api.ClearAllCacheRequest).SetIsDFS(true) + err = db.ClearAllCache(c) + util.AssertNil(err) + err = apis.Logout(db) + util.AssertNil(err) +} diff --git a/example/mtw/mtw.go b/example/mtw/mtw.go new file mode 100644 index 0000000..13d7202 --- /dev/null +++ b/example/mtw/mtw.go @@ -0,0 +1,184 @@ +package mtw + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/example/apis" + "github.com/dolphindb/api-go/example/util" + "github.com/dolphindb/api-go/model" + mtw "github.com/dolphindb/api-go/multigoroutinetable" +) + +// MultiGoroutineTable checks whether the MultiGoroutineTable is valid when inserts to memory table. +func MultiGoroutineTable() { + conn, err := dialer.NewSimpleConn(context.TODO(), apis.TestAddr, apis.User, apis.Password) + util.AssertNil(err) + + defer func() { + _, _ = conn.RunScript("for(db in getClusterDFSDatabases()){\n dropDatabase(db)\n}") + _ = conn.Close() + }() + + buf := bytes.NewBufferString("t=streamTable(1:0, `sym`tradeDate, [SYMBOL,DATEHOUR]);\n") + buf.WriteString("addColumn(t,\"col\"+string(1..200),take([DOUBLE],200));share t as t1;\n") + buf.WriteString("td=streamTable(1:0, `sym`tradeDate, [SYMBOL,DATEHOUR]);\n") + buf.WriteString("addColumn(td,\"col\"+string(1..200),take([DOUBLE],200));share td as trades;") + _, err = conn.RunScript(buf.String()) + util.AssertNil(err) + + opt := &mtw.Option{ + Database: "", + Address: apis.TestAddr, + UserID: apis.User, + Password: apis.Password, + TableName: "trades", + GoroutineCount: 2, + PartitionCol: "sym", + BatchSize: 10000, + Throttle: 1, + } + + mtt, err := mtw.NewMultiGoroutineTable(opt) + util.AssertNil(err) + + for ind := 0; ind < 10000; ind++ { + row := make([]model.DataForm, 202) + dt, err := model.NewDataType(model.DtString, "2") + util.AssertNil(err) + + row[0] = model.NewScalar(dt) + + dt, err = model.NewDataType(model.DtNanoTimestamp, time.Date(2022, time.Month(1), 1+ind%10, 1, 1, 0, 0, time.UTC)) + util.AssertNil(err) + + row[1] = model.NewScalar(dt) + i := float64(ind) + for j := 0; j < 200; j++ { + dt, err = model.NewDataType(model.DtDouble, i+0.1) + util.AssertNil(err) + + row[j+2] = model.NewScalar(dt) + } + + _, err = conn.RunFunc("tableInsert{t1}", row) + util.AssertNil(err) + + err = mtt.Insert("2", time.Date(2022, time.Month(1), 1+ind%10, 1, 1, 0, 0, time.UTC), i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1) + util.AssertNil(err) + } + + mtt.WaitForGoroutineCompletion() + raw, err := conn.RunScript("select * from trades order by sym,tradeDate;") + util.AssertNil(err) + + bt := raw.(*model.Table) + + raw, err = conn.RunScript("select * from t1 order by sym,tradeDate;") + util.AssertNil(err) + + ex := raw.(*model.Table) + for k := range bt.ColNames { + col1 := bt.GetColumnByIndex(k) + col2 := ex.GetColumnByIndex(k) + util.AssertEqual(col1.String(), col2.String()) + } + + fmt.Println("Run MultiGoroutineTable successful") +} + +// MultiGoroutineDfsTable checks whether the MultiGoroutineTable is valid when inserts to dfs table. +func MultiGoroutineDfsTable() { + conn, err := dialer.NewSimpleConn(context.TODO(), apis.TestAddr, apis.User, apis.Password) + util.AssertNil(err) + + defer func() { + _, _ = conn.RunScript("for(db in getClusterDFSDatabases()){\n dropDatabase(db)\n}") + _ = conn.Close() + }() + + buf := bytes.NewBufferString("t=table(1:0, `sym`tradeDate, [SYMBOL,TIMESTAMP]);\n") + buf.WriteString("addColumn(t,\"col\"+string(1..200),take([DOUBLE],200));share t as t1;\n") + buf.WriteString("dbName = \"dfs://test_MultigoroutineTableWriter\"\n") + buf.WriteString("if(exists(dbName)){\n") + buf.WriteString(" dropDatabase(dbName) \n") + buf.WriteString("}\n") + buf.WriteString("db=database(dbName, VALUE, date(1..2),,'TSDB');\n") + buf.WriteString("createPartitionedTable(dbHandle=db, table=t, tableName=`pt1, partitionColumns=[\"tradeDate\"],sortColumns=`tradeDate,compressMethods={tradeDate:\"delta\"});") + _, err = conn.RunScript(buf.String()) + util.AssertNil(err) + + opt := &mtw.Option{ + Database: "dfs://test_MultigoroutineTableWriter", + Address: apis.TestAddr, + UserID: apis.User, + Password: apis.Password, + TableName: "pt1", + GoroutineCount: 2, + PartitionCol: "tradeDate", + BatchSize: 10000, + Throttle: 1, + } + + mtt, err := mtw.NewMultiGoroutineTable(opt) + util.AssertNil(err) + + for ind := 0; ind < 10000; ind++ { + row := make([]model.DataForm, 202) + dt, err := model.NewDataType(model.DtString, "2") + util.AssertNil(err) + + row[0] = model.NewScalar(dt) + + dt, err = model.NewDataType(model.DtNanoTimestamp, time.Date(2022, time.Month(1), 1+ind%10, 1, 1, 0, 0, time.UTC)) + util.AssertNil(err) + + row[1] = model.NewScalar(dt) + i := float64(ind) + for j := 0; j < 200; j++ { + dt, err = model.NewDataType(model.DtDouble, i+0.1) + util.AssertNil(err) + + row[j+2] = model.NewScalar(dt) + } + + _, err = conn.RunFunc("tableInsert{t1}", row) + util.AssertNil(err) + + err = mtt.Insert("2", time.Date(2022, time.Month(1), 1+ind%10, 1, 1, 0, 0, time.UTC), i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1) + util.AssertNil(err) + } + + mtt.WaitForGoroutineCompletion() + raw, err := conn.RunScript("select * from loadTable('dfs://test_MultigoroutineTableWriter',`pt1) order by sym,tradeDate;") + util.AssertNil(err) + + bt := raw.(*model.Table) + + raw, err = conn.RunScript("select * from t1 order by sym,tradeDate;") + util.AssertNil(err) + + ex := raw.(*model.Table) + for k := range bt.ColNames { + col1 := bt.GetColumnByIndex(k) + col2 := ex.GetColumnByIndex(k) + util.AssertEqual(col1.String(), col2.String()) + } + + fmt.Println("Run MultiGoroutineedDfsTable successful") +} diff --git a/example/script/function.go b/example/script/function.go new file mode 100644 index 0000000..e318ad4 --- /dev/null +++ b/example/script/function.go @@ -0,0 +1,35 @@ +package script + +import ( + "fmt" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/example/util" + "github.com/dolphindb/api-go/model" +) + +// CheckFunction checks whether the RunFunc is valid. +func CheckFunction(db api.DolphinDB) { + l, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{1.5, 2.5, 7}) + util.AssertNil(err) + + df, err := db.RunFunc("sum", []model.DataForm{model.NewVector(l)}) + util.AssertNil(err) + util.AssertEqual(df.String(), "double(11)") + + _, err = db.RunScript("def f(a,b) {return a+b};") + util.AssertNil(err) + + arg0, err := model.NewDataType(model.DtInt, int32(1)) + util.AssertNil(err) + + arg1, err := model.NewDataType(model.DtInt, int32(2)) + util.AssertNil(err) + + df, err = db.RunFunc("f", []model.DataForm{model.NewScalar(arg0), + model.NewScalar(arg1)}) + util.AssertNil(err) + util.AssertEqual(df.String(), "int(3)") + + fmt.Println("CheckFunction Successful") +} diff --git a/example/script/script.go b/example/script/script.go new file mode 100644 index 0000000..5e0af9f --- /dev/null +++ b/example/script/script.go @@ -0,0 +1,580 @@ +package script + +import ( + "bytes" + "fmt" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/example/util" + "github.com/dolphindb/api-go/model" +) + +// CheckDataForm checks whether the DataForm serialization and deserialization are valid. +func CheckDataForm(db api.DolphinDB) { + // test parse dataForm chart + by := bytes.NewBuffer([]byte("dates=(2012.01.01..2016.07.31)[def(x):weekday(x) between 1:5]\n")) + by.WriteString("chartData=each(cumsum,reshape(rand(10000,dates.size()*5)-4500, dates.size():5))\n") + by.WriteString("chartData.rename!(dates, \"Strategy#\"+string(1..5))\n") + by.WriteString("plot(chartData,,[\"Cumulative Pnls of Five Strategies\",\"date\",\"pnl\"],LINE)") + ch, err := db.RunScript(by.String()) + util.AssertNil(err) + util.AssertEqual(ch.GetDataForm(), model.DfChart) + + // test render datatform vector + dt, err := model.NewDataType(model.DtString, "key") + util.AssertNil(err) + + vc := model.NewVector(model.NewDataTypeList(model.DtString, []model.DataType{dt})) + _, err = db.Upload(map[string]model.DataForm{"vector": vc}) + util.AssertNil(err) + + // test parse datatform vector + res, err := db.RunScript("vector") + util.AssertNil(err) + util.AssertEqual(res.String(), vc.String()) + util.AssertEqual(res.GetDataForm(), model.DfVector) + + // test render datatform vector with arrayvector + dls, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9}) + util.AssertNil(err) + + vct := model.NewVector(dls) + av := model.NewArrayVector([]*model.Vector{vct}) + + avc := model.NewVectorWithArrayVector(av) + _, err = db.Upload(map[string]model.DataForm{"arrvec": avc}) + util.AssertNil(err) + + // test parse datatform vector with arrayvector + df, err := db.RunScript("arrvec") + util.AssertNil(err) + util.AssertEqual(df.String(), avc.String()) + + // test render datatform scalar + s := model.NewScalar(dt) + + _, err = db.Upload(map[string]model.DataForm{"scalar": s}) + util.AssertNil(err) + + // test parse datatform scalar + res, err = db.RunScript("scalar") + util.AssertNil(err) + util.AssertEqual(res.String(), s.String()) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + + // test render datatform set + set := model.NewSet(vc) + _, err = db.Upload(map[string]model.DataForm{"set": set}) + util.AssertNil(err) + + // test parse datatform set + res, err = db.RunScript("set") + util.AssertNil(err) + util.AssertEqual(res.String(), set.String()) + util.AssertEqual(res.GetDataForm(), model.DfSet) + + // test render datatform table + tb := model.NewTable([]string{"key"}, []*model.Vector{vc}) + _, err = db.Upload(map[string]model.DataForm{"table": tb}) + util.AssertNil(err) + + // test parse datatform table + res, err = db.RunScript("table") + util.AssertNil(err) + util.AssertEqual(res.String(), tb.String()) + util.AssertEqual(res.GetDataForm(), model.DfTable) + + dt1, err := model.NewDataType(model.DtString, "value") + util.AssertNil(err) + + // test render datatform dictionary + dict := model.NewDictionary(vc, vc) + _, err = db.Upload(map[string]model.DataForm{"dict": dict}) + util.AssertNil(err) + + // test parse datatform dictionary + res, err = db.RunScript("dict") + util.AssertNil(err) + util.AssertEqual(res.GetDataForm(), model.DfDictionary) + + util.AssertEqual(res.String(), dict.String()) + util.AssertEqual(res.GetDataForm(), model.DfDictionary) + + // test render datatform pair + vc = model.NewVector(model.NewDataTypeList(model.DtString, []model.DataType{dt, dt1})) + pair := model.NewPair(vc) + + _, err = db.Upload(map[string]model.DataForm{"pair": pair}) + util.AssertNil(err) + + // test parse datatform pair + res, err = db.RunScript("pair") + util.AssertNil(err) + util.AssertEqual(res.String(), pair.String()) + util.AssertEqual(res.GetDataForm(), model.DfPair) + + // test parse datatform matrix + mtr, err := db.RunScript("cross(+, 1..5, 1..5)") + util.AssertNil(err) + util.AssertEqual(mtr.GetDataForm(), model.DfMatrix) + + // test render datatform matrix + _, err = db.Upload(map[string]model.DataForm{"mtr": mtr}) + util.AssertNil(err) + + res, err = db.RunScript("mtr") + util.AssertNil(err) + util.AssertEqual(res.String(), mtr.String()) + util.AssertEqual(res.GetDataForm(), model.DfMatrix) + + fmt.Println("CheckDataForm Successful") +} + +// CheckDataType checks whether the DataType serialization and deserialization are valid. +func CheckDataType(db api.DolphinDB) { + t := time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC).Add(1000 * time.Hour) + + // test render datatype bool + dt, err := model.NewDataType(model.DtBool, byte(1)) + util.AssertNil(err) + + s := model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"bool": s}) + util.AssertNil(err) + + // test parse datatype bool + raw, err := db.RunScript("bool") + util.AssertNil(err) + + res := raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtBool) + util.AssertEqual(res.String(), s.String()) + + // test render datatype bool + dal, err := model.NewDataTypeListWithRaw(model.DtAny, []model.DataForm{s}) + util.AssertNil(err) + + vc := model.NewVector(dal) + _, err = db.Upload(map[string]model.DataForm{"any": vc}) + util.AssertNil(err) + + // test parse datatype any + raw, err = db.RunScript("any") + util.AssertNil(err) + + v := raw.(*model.Vector) + util.AssertEqual(v.GetDataForm(), model.DfVector) + util.AssertEqual(v.String(), vc.String()) + + // test render datatype string + dt, err = model.NewDataType(model.DtString, "example") + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"string": s}) + util.AssertNil(err) + + // test parse datatype string + raw, err = db.RunScript("string") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtString) + util.AssertEqual(res.String(), s.String()) + + // test render datatype char + dt, err = model.NewDataType(model.DtChar, byte(97)) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"char": s}) + util.AssertNil(err) + + // test parse datatype char + raw, err = db.RunScript("char") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtChar) + util.AssertEqual(res.String(), s.String()) + + // test render datatype complex + dt, err = model.NewDataType(model.DtComplex, [2]float64{1, 1}) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"complex": s}) + util.AssertNil(err) + + // test parse datatype complex + raw, err = db.RunScript("complex") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtComplex) + util.AssertEqual(res.String(), s.String()) + + // test render datatype short + dt, err = model.NewDataType(model.DtShort, int16(10)) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"short": s}) + util.AssertNil(err) + + // test parse datatype short + raw, err = db.RunScript("short") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtShort) + util.AssertEqual(res.String(), s.String()) + + // test render datatype blob + dt, err = model.NewDataType(model.DtBlob, []byte{10, 12, 14, 56}) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"blob": s}) + util.AssertNil(err) + + // test parse datatype blob + raw, err = db.RunScript("blob") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtBlob) + util.AssertEqual(res.String(), s.String()) + + // test render datatype date + dt, err = model.NewDataType(model.DtDate, t) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"date": s}) + util.AssertNil(err) + + // test parse datatype date + raw, err = db.RunScript("date") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtDate) + util.AssertEqual(res.String(), s.String()) + + // test render datatype datehour + dt, err = model.NewDataType(model.DtDateHour, t) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"datehour": s}) + util.AssertNil(err) + + // test parse datatype datehour + raw, err = db.RunScript("datehour") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtDateHour) + util.AssertEqual(res.String(), s.String()) + + // test render datatype datetime + dt, err = model.NewDataType(model.DtDatetime, t) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"datetime": s}) + util.AssertNil(err) + + // test parse datatype datetime + raw, err = db.RunScript("datetime") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtDatetime) + util.AssertEqual(res.String(), s.String()) + + // test render datatype double + dt, err = model.NewDataType(model.DtDouble, float64(1)) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"double": s}) + util.AssertNil(err) + + // test parse datatype double + raw, err = db.RunScript("double") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtDouble) + util.AssertEqual(res.String(), s.String()) + + // test render datatype float + dt, err = model.NewDataType(model.DtFloat, float32(1.0)) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"float": s}) + util.AssertNil(err) + + // test parse datatype float + raw, err = db.RunScript("float") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtFloat) + util.AssertEqual(res.String(), s.String()) + + // test render datatype duration + dt, err = model.NewDataType(model.DtDuration, "10H") + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"duration": s}) + util.AssertNil(err) + + // test parse datatype duration + raw, err = db.RunScript("duration") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtDuration) + util.AssertEqual(res.String(), s.String()) + + // test render datatype int + dt, err = model.NewDataType(model.DtInt, int32(10)) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"int": s}) + util.AssertNil(err) + + // test parse datatype int + raw, err = db.RunScript("int") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtInt) + util.AssertEqual(res.String(), s.String()) + + // test render datatype int128 + dt, err = model.NewDataType(model.DtInt128, "e1671797c52e15f763380b45e841ec32") + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"int128": s}) + util.AssertNil(err) + + // test parse datatype int128 + raw, err = db.RunScript("int128") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtInt128) + util.AssertEqual(res.String(), s.String()) + + // test render datatype ip + dt, err = model.NewDataType(model.DtIP, "346b:6c2a:3347:d244:7654:5d5a:bcbb:5dc7") + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"ip": s}) + util.AssertNil(err) + + // test parse datatype ip + raw, err = db.RunScript("ip") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtIP) + util.AssertEqual(res.String(), s.String()) + + // test render datatype long + dt, err = model.NewDataType(model.DtLong, int64(100)) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"long": s}) + util.AssertNil(err) + + // test parse datatype long + raw, err = db.RunScript("long") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtLong) + util.AssertEqual(res.String(), s.String()) + + // test render datatype minute + dt, err = model.NewDataType(model.DtMinute, t) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"minute": s}) + util.AssertNil(err) + + // test parse datatype minute + raw, err = db.RunScript("minute") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtMinute) + util.AssertEqual(res.String(), s.String()) + + // test render datatype month + dt, err = model.NewDataType(model.DtMonth, time.Date(2021, 5, 1, 1, 1, 1, 1, time.UTC)) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"month": s}) + util.AssertNil(err) + + // test parse datatype month + raw, err = db.RunScript("month") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtMonth) + util.AssertEqual(res.String(), s.String()) + + // test render datatype nanotime + dt, err = model.NewDataType(model.DtNanoTime, t) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"nanotime": s}) + util.AssertNil(err) + + // test parse datatype nanotime + raw, err = db.RunScript("nanotime") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtNanoTime) + util.AssertEqual(res.String(), s.String()) + + // test render datatype nanotimestamp + dt, err = model.NewDataType(model.DtNanoTimestamp, t) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"nanotimestamp": s}) + util.AssertNil(err) + + // test parse datatype nanotimestamp + raw, err = db.RunScript("nanotimestamp") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtNanoTimestamp) + util.AssertEqual(res.String(), s.String()) + + // test render datatype point + dt, err = model.NewDataType(model.DtPoint, [2]float64{10, 10}) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"point": s}) + util.AssertNil(err) + + // test parse datatype point + raw, err = db.RunScript("point") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtPoint) + util.AssertEqual(res.String(), s.String()) + + // test render datatype second + dt, err = model.NewDataType(model.DtSecond, t) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"second": s}) + util.AssertNil(err) + + // test parse datatype second + raw, err = db.RunScript("second") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtSecond) + util.AssertEqual(res.String(), s.String()) + + // test render datatype time + dt, err = model.NewDataType(model.DtTime, t) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"time": s}) + util.AssertNil(err) + + // test parse datatype time + raw, err = db.RunScript("time") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtTime) + util.AssertEqual(res.String(), s.String()) + + // test render datatype timestamp + dt, err = model.NewDataType(model.DtTimestamp, t) + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"timestamp": s}) + util.AssertNil(err) + + // test parse datatype timestamp + raw, err = db.RunScript("timestamp") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtTimestamp) + util.AssertEqual(res.String(), s.String()) + + // test render datatype uuid + dt, err = model.NewDataType(model.DtUUID, "e5eca940-5b99-45d0-bf1c-620f6b1b9d5b") + util.AssertNil(err) + + s = model.NewScalar(dt) + _, err = db.Upload(map[string]model.DataForm{"uuid": s}) + util.AssertNil(err) + + // test parse datatype uuid + raw, err = db.RunScript("uuid") + util.AssertNil(err) + + res = raw.(*model.Scalar) + util.AssertEqual(res.GetDataForm(), model.DfScalar) + util.AssertEqual(res.DataType.DataType(), model.DtUUID) + util.AssertEqual(res.String(), s.String()) + + fmt.Println("CheckDataType Successful") +} diff --git a/example/streaming_test/streaming.go b/example/streaming_test/streaming.go new file mode 100644 index 0000000..a96e793 --- /dev/null +++ b/example/streaming_test/streaming.go @@ -0,0 +1,210 @@ +package streaming_test + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/example/apis" + "github.com/dolphindb/api-go/example/util" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/streaming" +) + +var streamConn dialer.Conn + +type sampleHandler1 struct{} + +func (s *sampleHandler1) DoEvent(msg streaming.IMessage) { + val0 := msg.GetValue(0).(*model.Scalar).DataType.String() + val1 := msg.GetValue(0).(*model.Scalar).DataType.String() + val2 := msg.GetValue(0).(*model.Scalar).DataType.String() + val3 := msg.GetValue(0).(*model.Scalar).DataType.String() + val4 := msg.GetValue(0).(*model.Scalar).DataType.String() + val5 := msg.GetValue(0).(*model.Scalar).DataType.String() + val6 := msg.GetValue(0).(*model.Scalar).DataType.String() + val7 := msg.GetValue(0).(*model.Scalar).DataType.String() + val8 := msg.GetValue(0).(*model.Scalar).DataType.String() + val9 := msg.GetValue(0).(*model.Scalar).DataType.String() + val10 := msg.GetValue(0).(*model.Scalar).DataType.String() + val11 := msg.GetValue(0).(*model.Scalar).DataType.String() + val12 := msg.GetValue(0).(*model.Scalar).DataType.String() + + script := fmt.Sprintf("insert into sub1 values(%s,%s,\"%s\",%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", + val0, val1, val2, val3, val4, val5, val6, val7, val8, val9, val10, val11, val12) + + _, err := streamConn.RunScript(script) + util.AssertNil(err) +} + +type sampleHandler2 struct{} + +func (s *sampleHandler2) DoEvent(msg streaming.IMessage) { + val0 := msg.GetValue(0).(*model.Scalar).DataType.String() + val1 := msg.GetValue(0).(*model.Scalar).DataType.String() + val2 := msg.GetValue(0).(*model.Scalar).DataType.String() + val3 := msg.GetValue(0).(*model.Scalar).DataType.String() + val4 := msg.GetValue(0).(*model.Scalar).DataType.String() + val5 := msg.GetValue(0).(*model.Scalar).DataType.String() + val6 := msg.GetValue(0).(*model.Scalar).DataType.String() + val7 := msg.GetValue(0).(*model.Scalar).DataType.String() + val8 := msg.GetValue(0).(*model.Scalar).DataType.String() + val9 := msg.GetValue(0).(*model.Scalar).DataType.String() + val10 := msg.GetValue(0).(*model.Scalar).DataType.String() + val11 := msg.GetValue(0).(*model.Scalar).DataType.String() + val12 := msg.GetValue(0).(*model.Scalar).DataType.String() + + script := fmt.Sprintf("insert into sub2 values(%s,%s,\"%s\",%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", + val0, val1, val2, val3, val4, val5, val6, val7, val8, val9, val10, val11, val12) + + _, err := streamConn.RunScript(script) + util.AssertNil(err) +} + +func prepareStreamTable(db api.DolphinDB, tableName string) { + script := fmt.Sprintf("share(streamTable(1000000:0, `permno`timestamp`ticker`price1`price2`price3`price4`price5`vol1`vol2`vol3`vol4`vol5, [INT, TIMESTAMP, SYMBOL, DOUBLE, DOUBLE, DOUBLE, DOUBLE, DOUBLE, INT, INT, INT, INT, INT]), \"%s\")", tableName) + _, err := db.RunScript(script) + util.AssertNil(err) +} + +func writeStreamTable(db api.DolphinDB, tableName string, batch int) { + buf := bytes.NewBufferString(fmt.Sprintf("tmp = table(%d:%d, `permno`timestamp`ticker`price1`price2`price3`price4`price5`vol1`vol2`vol3`vol4`vol5, [INT, TIMESTAMP, SYMBOL, DOUBLE, DOUBLE, DOUBLE, DOUBLE, DOUBLE, INT, INT, INT, INT, INT]);", batch, batch)) + buf.WriteString(fmt.Sprintf("tmp[`permno] = rand(1000, %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`timestamp] = take(now(), %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`ticker] = rand(\"A\"+string(1..1000), %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`price1] = rand(100, %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`price2] = rand(100, %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`price3] = rand(100, %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`price4] = rand(100, %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`price5] = rand(100, %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`vol1] = rand(100, %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`vol2] = rand(100, %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`vol3] = rand(100, %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`vol4] = rand(100, %d);", batch)) + buf.WriteString(fmt.Sprintf("tmp[`vol5] = rand(100, %d);", batch)) + buf.WriteString(fmt.Sprintf("%s.append!(tmp);", tableName)) + + _, err := db.RunScript(buf.String()) + util.AssertNil(err) +} + +// GoroutineClient checks whether the GoroutineClient is valid +func GoroutineClient(db api.DolphinDB) { + var err error + streamConn, err = dialer.NewSimpleConn(context.TODO(), apis.TestAddr, apis.User, apis.Password) + util.AssertNil(err) + + prepareStreamTable(db, "pub") + prepareStreamTable(db, "sub1") + writeStreamTable(db, "pub", 1000) + client := streaming.NewGoroutineClient("localhost", 8100) + req := &streaming.SubscribeRequest{ + Address: apis.TestAddr, + TableName: "pub", + ActionName: "action1", + Handler: new(sampleHandler1), + Offset: 0, + Reconnect: true, + } + + err = client.Subscribe(req) + util.AssertNil(err) + + time.Sleep(1 * time.Second) + + df, err := db.RunScript("exec count(*) from sub1") + util.AssertNil(err) + util.AssertEqual(df.String(), "int(1000)") + + err = client.UnSubscribe(req) + util.AssertNil(err) + + client.Close() + _, _ = db.RunScript("undef(`pub, SHARED)") + _, _ = db.RunScript("undef(`sub1, SHARED)") + + streamConn.Close() + + fmt.Println("Run GoroutineClient successful") +} + +// PollingClient checks whether the PollingClient is valid +func PollingClient(db api.DolphinDB) { + var err error + streamConn, err = dialer.NewSimpleConn(context.TODO(), apis.TestAddr, apis.User, apis.Password) + util.AssertNil(err) + + prepareStreamTable(db, "pub1") + writeStreamTable(db, "pub1", 1000) + client := streaming.NewPollingClient("localhost", 8101) + req := &streaming.SubscribeRequest{ + Address: apis.TestAddr, + TableName: "pub1", + ActionName: "action1", + Offset: 0, + Reconnect: true, + } + + poll, err := client.Subscribe(req) + util.AssertNil(err) + + time.Sleep(1 * time.Second) + + msg := poll.Poll(1000, 1000) + util.AssertEqual(len(msg), 1000) + + err = client.UnSubscribe(req) + util.AssertNil(err) + + client.Close() + _, err = db.RunScript("undef(`pub1, SHARED)") + util.AssertNil(err) + + streamConn.Close() + + fmt.Println("Run PollingClient successful") +} + +// GoroutinePooledClient checks whether the GoroutinePooledClient is valid +func GoroutinePooledClient(db api.DolphinDB) { + var err error + streamConn, err = dialer.NewSimpleConn(context.TODO(), apis.TestAddr, apis.User, apis.Password) + util.AssertNil(err) + + prepareStreamTable(db, "pub2") + prepareStreamTable(db, "sub2") + writeStreamTable(db, "pub2", 1000) + client := streaming.NewGoroutinePooledClient("localhost", 8102) + req := &streaming.SubscribeRequest{ + Address: apis.TestAddr, + TableName: "pub2", + ActionName: "action1", + Handler: new(sampleHandler2), + Offset: 0, + Reconnect: true, + } + + err = client.Subscribe(req) + util.AssertNil(err) + + writeStreamTable(db, "pub2", 1000) + time.Sleep(1 * time.Second) + + df, err := db.RunScript("exec count(*) from sub2") + util.AssertNil(err) + util.AssertEqual(df.String(), "int(2000)") + + err = client.UnSubscribe(req) + util.AssertNil(err) + + client.Close() + _, _ = db.RunScript("undef(`pub2, SHARED)") + _, _ = db.RunScript("undef(`sub2, SHARED)") + + streamConn.Close() + + fmt.Println("Run GoroutinePooledClient successful") +} diff --git a/example/util/util.go b/example/util/util.go new file mode 100644 index 0000000..e61665d --- /dev/null +++ b/example/util/util.go @@ -0,0 +1,30 @@ +package util + +import ( + "fmt" + "reflect" +) + +// AssertNil checks whether the error is nil +// if not, panic. +func AssertNil(err error) { + if err != nil { + panic(fmt.Sprintf("err is not nil: %s", err.Error())) + } +} + +// AssertEqual checks whether the s is equal to the d +// if not, panic. +func AssertEqual(s, d interface{}) { + if !reflect.DeepEqual(s, d) { + panic(fmt.Sprintf("%v != %v", s, d)) + } +} + +// AssertTrue checks whether the s is true +// if not, panic. +func AssertTrue(s bool) { + if !s { + panic(fmt.Sprintf("%v is not equal true", s)) + } +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..6dc965d --- /dev/null +++ b/go.mod @@ -0,0 +1,16 @@ +module github.com/dolphindb/api-go + +go 1.15 + +require ( + github.com/satori/go.uuid v1.1.0 + github.com/smallnest/chanx v1.0.0 + github.com/stretchr/testify v1.7.2 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/kr/pretty v0.3.0 // indirect + github.com/smartystreets/goconvey v1.7.2 + gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..55a0c15 --- /dev/null +++ b/go.sum @@ -0,0 +1,45 @@ +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/satori/go.uuid v1.1.0 h1:B9KXyj+GzIpJbV7gmr873NsY6zpbxNy24CBtGrk7jHo= +github.com/satori/go.uuid v1.1.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/smallnest/chanx v1.0.0 h1:xd03MIQ0+doMOPQz/RQfHOTEeBTETputo9YZASCE/Os= +github.com/smallnest/chanx v1.0.0/go.mod h1:LH2uJLgza9WaWa4MgunOQERqM3t2ryDEq9LkPBlEfWM= +github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= +github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= +github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= +github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/images/ddb.svg b/images/ddb.svg deleted file mode 100644 index bd41105..0000000 --- a/images/ddb.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/model/chart.go b/model/chart.go new file mode 100644 index 0000000..53be023 --- /dev/null +++ b/model/chart.go @@ -0,0 +1,218 @@ +package model + +import ( + "fmt" + "strings" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +// ChartType is a map storing the mapping relationship. +var ChartType = map[string]string{ + "0": "CT_AREA", + "1": "CT_BAR", + "2": "CT_COLUMN", + "3": "CT_HISTOGRAM", + "4": "CT_LINE", + "5": "CT_PIE", + "6": "CT_SCATTER", + "7": "CT_TREND", + "8": "CT_KLINE", + "9": "CT_STACK", +} + +// Chart is a DataForm. +type Chart struct { + category *Category + rowCount int + + Title DataForm + ChartType *Scalar + Stacking *Scalar + Data *Matrix + Extras *Dictionary +} + +// NewChart returns an object of chart according to in. +func NewChart(in map[string]DataForm) *Chart { + ch := &Chart{ + rowCount: len(in), + category: newCategory(byte(DfChart), byte(DtAny)), + } + + for k, v := range in { + switch k { + case "title": + ch.Title = v.(*Vector) + case "chartType": + ch.ChartType = v.(*Scalar) + case "stacking": + ch.Stacking = v.(*Scalar) + case "data": + ch.Data = v.(*Matrix) + case "extras": + ch.Extras = v.(*Dictionary) + } + } + return ch +} + +// GetDataForm returns the byte type of the DataForm. +func (ch *Chart) GetDataForm() DataFormByte { + return DfChart +} + +// GetDataType returns the byte type of the DataType. +func (ch *Chart) GetDataType() DataTypeByte { + return DtAny +} + +// GetTitle returns the string type of the title. +func (ch *Chart) GetTitle() string { + if ch.Title == nil { + return "" + } else if ch.Title.GetDataForm() == DfScalar { + return ch.Title.(*Scalar).DataType.String() + } + + return ch.Title.(*Vector).Data.ElementString(0) +} + +// GetChartType returns the string type of the ChartType. +func (ch *Chart) GetChartType() string { + if ch.ChartType == nil { + return "" + } + + return ChartType[ch.ChartType.DataType.String()] +} + +// GetXAxisName returns the XAxisName of the title. +func (ch *Chart) GetXAxisName() string { + if ch.Title == nil || ch.Title.GetDataForm() != DfVector || ch.Title.Rows() < 2 { + return "" + } + + return ch.Title.(*Vector).Data.ElementString(1) +} + +// GetYAxisName returns the YAxisName of the title. +func (ch *Chart) GetYAxisName() string { + if ch.Title == nil || ch.Title.GetDataForm() != DfVector || ch.Title.Rows() < 3 { + return "" + } + + return ch.Title.(*Vector).Data.ElementString(2) +} + +// GetDataTypeString returns the string format of the DataType. +func (ch *Chart) GetDataTypeString() string { + return GetDataTypeString(ch.category.DataType) +} + +// Rows returns the row num of the DataForm. +func (ch *Chart) Rows() int { + return ch.rowCount +} + +// Render serializes the DataForm with bo and input it into w. +func (ch *Chart) Render(w *protocol.Writer, bo protocol.ByteOrder) error { + if err := ch.category.render(w); err != nil { + return err + } + + keys, values := ch.packKeysAndValues() + kdl, err := NewDataTypeListWithRaw(DtString, keys) + if err != nil { + return err + } + + kv := NewVector(kdl) + if err = kv.Render(w, bo); err != nil { + return err + } + + vdl, err := NewDataTypeListWithRaw(DtAny, values) + if err != nil { + return err + } + + vv := NewVector(vdl) + if err = vv.Render(w, bo); err != nil { + return err + } + + return nil +} + +func (ch *Chart) packKeysAndValues() ([]string, []DataForm) { + keys := make([]string, 0) + values := make([]DataForm, 0) + if ch.Title != nil { + keys = append(keys, "title") + values = append(values, ch.Title) + } + + if ch.ChartType != nil { + keys = append(keys, "chartType") + values = append(values, ch.ChartType) + } + + if ch.Stacking != nil { + keys = append(keys, "stacking") + values = append(values, ch.Stacking) + } + + if ch.Data != nil { + keys = append(keys, "data") + values = append(values, ch.Data) + } + + if ch.Extras != nil { + keys = append(keys, "extras") + values = append(values, ch.Extras) + } + + return keys, values +} + +func (ch *Chart) String() string { + by := strings.Builder{} + by.WriteString("Chart({\n") + if ch.Title != nil { + var val interface{} + if ch.Title.GetDataForm() == DfVector { + val = ch.Title.(*Vector).formatString() + } else if ch.Title.GetDataForm() == DfScalar { + val = ch.Title.(*Scalar).DataType.String() + } + by.WriteString(fmt.Sprintf(" title: %v\n", val)) + } + + if ch.ChartType != nil { + v := ch.ChartType.DataType.String() + by.WriteString(fmt.Sprintf(" chartType: %s\n", ChartType[v])) + } + + if ch.Stacking != nil { + v, err := ch.Stacking.Bool() + if err != nil { + return "" + } + + by.WriteString(fmt.Sprintf(" stacking: %v\n", v)) + } + + if ch.Data != nil { + v := ch.Data.String() + by.WriteString(fmt.Sprintf(" data: %s\n", v)) + } + + if ch.Extras != nil { + v := ch.Extras.String() + by.WriteString(fmt.Sprintf(" extras: %s\n", v)) + } + + by.WriteString("})") + return by.String() +} diff --git a/model/chart_test.go b/model/chart_test.go new file mode 100644 index 0000000..a10d849 --- /dev/null +++ b/model/chart_test.go @@ -0,0 +1,68 @@ +package model + +import ( + "bytes" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + + "github.com/stretchr/testify/assert" +) + +const chartExpect = "Chart({\n title: [chart xaxis yaxis]\n chartType: CT_LINE\n stacking: false\n data: matrix[3r][1c]({\n rows: null,\n cols: null,\n data: stringArray(3) [\n m1,\n m2,\n m3,\n ]\n})\n extras: dict([\n string[3]([key1, key2, key3]),\n string[3]([value1, value2, value3]),\n])\n})" + +func TestChart(t *testing.T) { + dtl, err := NewDataTypeListWithRaw(DtString, []string{"chart", "xaxis", "yaxis"}) + assert.Nil(t, err) + assert.Equal(t, dtl.DataType(), DtString) + + ti := NewVector(dtl) + + dt, err := NewDataType(DtInt, int32(4)) + assert.Nil(t, err) + + ct := NewScalar(dt) + + dt, err = NewDataType(DtBool, byte(0)) + assert.Nil(t, err) + + st := NewScalar(dt) + + d, err := NewDataTypeListWithRaw(DtString, []string{"m1", "m2", "m3"}) + assert.Nil(t, err) + + data := NewMatrix(NewVector(d), nil, nil) + + keys, err := NewDataTypeListWithRaw(DtString, []string{"key1", "key2", "key3"}) + assert.Nil(t, err) + + values, err := NewDataTypeListWithRaw(DtString, []string{"value1", "value2", "value3"}) + assert.Nil(t, err) + + extras := NewDictionary(NewVector(keys), NewVector(values)) + + ch := NewChart(map[string]DataForm{ + "title": ti, + "chartType": ct, + "stacking": st, + "data": data, + "extras": extras, + }) + assert.Equal(t, ch.GetDataForm(), DfChart) + assert.Equal(t, ch.GetDataType(), DtAny) + assert.Equal(t, ch.GetDataTypeString(), "any") + assert.Equal(t, ch.Rows(), 5) + assert.Equal(t, ch.GetTitle(), "chart") + assert.Equal(t, ch.GetXAxisName(), "xaxis") + assert.Equal(t, ch.GetYAxisName(), "yaxis") + assert.Equal(t, ch.GetChartType(), "CT_LINE") + + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = ch.Render(w, protocol.LittleEndian) + w.Flush() + + assert.Nil(t, err) + assert.Equal(t, by.String(), "\x19\a\x12\x01\x05\x00\x00\x00\x01\x00\x00\x00title\x00chartType\x00stacking\x00data\x00extras\x00\x19\x01\x05\x00\x00\x00\x01\x00\x00\x00\x12\x01\x03\x00\x00\x00\x01\x00\x00\x00chart\x00xaxis\x00yaxis\x00\x04\x00\x04\x00\x00\x00\x01\x00\x00\x12\x03\x00\x12\x03\x03\x00\x00\x00\x01\x00\x00\x00m1\x00m2\x00m3\x00\x12\x05\x12\x01\x03\x00\x00\x00\x01\x00\x00\x00key1\x00key2\x00key3\x00\x12\x01\x03\x00\x00\x00\x01\x00\x00\x00value1\x00value2\x00value3\x00") + assert.Equal(t, ch.String(), chartExpect) +} diff --git a/model/const.go b/model/const.go new file mode 100644 index 0000000..ea7a1c9 --- /dev/null +++ b/model/const.go @@ -0,0 +1,192 @@ +package model + +import ( + "math" + "time" +) + +const ( + // MinInt8 is the minimum int8 of type uint8. + MinInt8 uint8 = 128 + // MinInt32 is minimum int32 of type uint32. + MinInt32 uint32 = 2147483648 +) + +const void = "void(null)" + +const ( + // LOGICAL is the string type of category LOGICAL. + LOGICAL CategoryString = "LOGICAL" + // NOTHING is the string type of category NOTHING. + NOTHING CategoryString = "NOTHING" + // INTEGRAL is the string type of category INTEGRAL. + INTEGRAL CategoryString = "INTEGRAL" + // FLOATING is the string type of category FLOATING. + FLOATING CategoryString = "FLOATING" + // TEMPORAL is the string type of category TEMPORAL. + TEMPORAL CategoryString = "TEMPORAL" + // LITERAL is the string type of category LITERAL. + LITERAL CategoryString = "LITERAL" + // SYSTEM is the string type of category SYSTEM. + SYSTEM CategoryString = "SYSTEM" + // MIXED is the string type of category MIXED. + MIXED CategoryString = "MIXED" + // BINARY is the string type of category BINARY. + BINARY CategoryString = "BINARY" + // ARRAY is the string type of category ARRAY. + ARRAY CategoryString = "ARRAY" +) + +const ( + // DfScalar is the byte type of Scalar. + DfScalar DataFormByte = iota + // DfVector is the byte type of Vector. + DfVector + // DfPair is the byte type of Pair. + DfPair + // DfMatrix is the byte type of Matrix. + DfMatrix + // DfSet is the byte type of Set. + DfSet + // DfDictionary is the byte type of Dictionary. + DfDictionary + // DfTable is the byte type of Table. + DfTable + // DfChart is the byte type of Chart. + DfChart + // DfChunk is the byte type of Chunk. + DfChunk +) + +const ( + // DtVoid is the byte type of Void. + DtVoid DataTypeByte = iota + // DtBool is the byte type of Bool. + DtBool + // DtChar is the byte type of Char. + DtChar + // DtShort is the byte type of Short. + DtShort + // DtInt is the byte type of Int. + DtInt + // DtLong is the byte type of Long. + DtLong + // DtDate is the byte type of Date. + DtDate + // DtMonth is the byte type of Month. + DtMonth + // DtTime is the byte type of Time. + DtTime + // DtMinute is the byte type of Minute. + DtMinute + // DtSecond is the byte type of Second. + DtSecond + // DtDatetime is the byte type of Datetime. + DtDatetime + // DtTimestamp is the byte type of Timestamp. + DtTimestamp + // DtNanoTime is the byte type of NanoTime. + DtNanoTime + // DtNanoTimestamp is the byte type of NanoTimestamp. + DtNanoTimestamp + // DtFloat is the byte type of Float. + DtFloat + // DtDouble is the byte type of Double. + DtDouble + // DtSymbol is the byte type of Symbol. + DtSymbol + // DtString is the byte type of String. + DtString + // DtUUID is the byte type of UUID. + DtUUID + // DtFunction is the byte type of Function. + DtFunction + // DtHandle is the byte type of Handle. + DtHandle + // DtCode is the byte type of Code. + DtCode + // DtDatasource is the byte type of Datasource. + DtDatasource + // DtResource is the byte type of Resource. + DtResource + // DtAny is the byte type of Any. + DtAny + // DtCompress is the byte type of Compress. + DtCompress + // DtDictionary is the byte type of Dictionary. + DtDictionary + // DtDateHour is the byte type of DateHour. + DtDateHour + // DtDateMinute is the byte type of DateMinute. + DtDateMinute + // DtIP is the byte type of IP. + DtIP + // DtInt128 is the byte type of Int128. + DtInt128 + // DtBlob is the byte type of Blob. + DtBlob + dt33 + // DtComplex is the byte type of Complex. + DtComplex + // DtPoint is the byte type of Point. + DtPoint + // DtDuration is the byte type of Duration. + DtDuration + // DtObject is the byte type of Object. + DtObject +) + +// CategoryString is the string type of Category. +type CategoryString string + +// DataTypeByte is the byte type of DataType. +type DataTypeByte byte + +// DataFormByte is the byte type of DataForm. +type DataFormByte byte + +var ( + emptyTime = time.Time{} + emptyDuration = [2]uint32{MinInt32, 0} + emptyInt64List = [2]uint64{0, 0} + emptyPoint = "(,)" +) + +var ( + // Null value for DtString, DtSymbol. + NullString = "" + // Null value for DtAny. + NullAny = nullDataForm + // Null value for DtUUID. + NullUUID = "00000000-0000-0000-0000-000000000000" + // Null value for DtInt128. + NullInt128 = "00000000000000000000000000000000" + // Null value for DtIP. + NullIP = "0.0.0.0" + // Null value for DTShort. + NullShort = int16(math.MinInt16) + // Null value for DtDate,DtDateHour,DtDatetime,DtMinute,DtNanoTime,DtNanoTimestamp,DtSecond,DtMonth,DtTimestamp. + NullTime = emptyTime + // Null value for DtLong. + NullLong = int64(math.MinInt64) + // Null value for DtDuration. + NullDuration = "" + // Null value for DtFloat. + NullFloat = float32(-math.MaxFloat32) + // Null value for DtDouble. + NullDouble = -math.MaxFloat64 + // Null value for DtInt. + NullInt = int32(math.MinInt32) + // Null value for DtComplex. + NullComplex = [2]float64{-math.MaxFloat64, -math.MaxFloat64} + // Null value for DtPoint. + NullPoint = [2]float64{-math.MaxFloat64, -math.MaxFloat64} + // Null value for DtBlob. + NullBlob = []byte{} + // Null value for DtBool. + NullBool = MinInt8 + // Null value for DtChar. + NullChar = MinInt8 + // Null value for DtCompress. + NullCompress = MinInt8 +) diff --git a/model/const_test.go b/model/const_test.go new file mode 100644 index 0000000..8b53790 --- /dev/null +++ b/model/const_test.go @@ -0,0 +1 @@ +package model diff --git a/model/dataform.go b/model/dataform.go new file mode 100644 index 0000000..8e2d2bf --- /dev/null +++ b/model/dataform.go @@ -0,0 +1,79 @@ +package model + +import ( + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/dolphindb/api-go/errors" +) + +// DataForm interface declares functions to handle DataForm data. +type DataForm interface { + // Render serializes the DataForm with bo and input it into w + Render(w *protocol.Writer, bo protocol.ByteOrder) error + + // GetDataForm returns the byte type of the DataForm + GetDataForm() DataFormByte + // GetDataType returns the byte type of the DataType + GetDataType() DataTypeByte + // GetDataTypeString returns the string format of the DataType + GetDataTypeString() string + + // String returns the string of the DataForm + String() string + // Rows returns the row num of the DataForm + Rows() int +} + +// Category stores the DataFormByte and the DataTypeByte of a DataForm. +type Category struct { + DataForm DataFormByte + DataType DataTypeByte +} + +func newCategory(dataForm, datatype byte) *Category { + return &Category{ + DataForm: DataFormByte(dataForm), + DataType: DataTypeByte(datatype), + } +} + +func (cg *Category) render(w *protocol.Writer) error { + return w.Write([]byte{byte(cg.DataType), byte(cg.DataForm)}) +} + +func parseCategory(r protocol.Reader) (*Category, error) { + c, err := r.ReadCertainBytes(2) + if err != nil { + return nil, errors.ReadDataTypeAndDataFormError(err.Error()) + } + + return newCategory(c[1], c[0]), nil +} + +// ParseDataForm parses the raw bytes in r with bo and return a DataForm object. +func ParseDataForm(r protocol.Reader, bo protocol.ByteOrder) (DataForm, error) { + c, err := parseCategory(r) + if err != nil { + return nil, err + } + + switch c.DataForm { + case DfScalar: + return parseScalar(r, bo, c) + case DfTable: + return parseTable(r, bo, c) + case DfVector: + return parseVector(r, bo, c) + case DfPair: + return parsePair(r, bo, c) + case DfMatrix: + return parseMatrix(r, bo, c) + case DfSet: + return parseSet(r, bo, c) + case DfDictionary: + return parseDictionary(r, bo, c) + case DfChart: + return parseChart(r, bo, c) + } + + return nil, err +} diff --git a/model/dataform_test.go b/model/dataform_test.go new file mode 100644 index 0000000..a91853e --- /dev/null +++ b/model/dataform_test.go @@ -0,0 +1,75 @@ +package model + +import ( + "bytes" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + + "github.com/stretchr/testify/assert" +) + +var dataFormBytes = map[DataFormByte][]byte{ + DfScalar: {18, 0, 115, 99, 97, 108, 97, 114, 0}, + DfTable: {0, 6, 1, 0, 0, 0, 1, 0, 0, 0, 116, 97, 98, 108, 101, 0, 99, 111, 108, 0, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 99, 111, 108, 49, 0, 99, 111, 108, 49, 0, 99, 111, 108, 49, 0}, + DfVector: {68, 1, 3, 0, 0, 0, 1, 0, 0, 0, 3, 0, 1, 0, 3, 3, 3, 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6, 0, 0, 0, 7, 0, 0, 0, 8, 0, 0, 0, 9, 0, 0, 0}, + DfPair: {18, 2, 3, 0, 0, 0, 1, 0, 0, 0, 107, 101, 121, 49, 0, 107, 101, 121, 50, 0, 107, 101, 121, 51, 0}, + DfMatrix: {18, 3, 3, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 118, 97, 108, 117, 101, 49, 0, 118, 97, 108, 117, 101, 50, 0, 118, 97, 108, 117, 101, 51, 0, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 118, 97, 108, 117, 101, 49, 0, 118, 97, 108, 117, 101, 50, 0, 118, 97, 108, 117, 101, 51, 0, 18, 3, 3, 0, 0, 0, 1, 0, 0, 0, 107, 101, 121, 49, 0, 107, 101, 121, 50, 0, 107, 101, 121, 51, 0}, + DfSet: {18, 4, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 107, 101, 121, 49, 0, 107, 101, 121, 50, 0, 107, 101, 121, 51, 0}, + DfDictionary: {18, 5, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 107, 101, 121, 49, 0, 107, 101, 121, 50, 0, 107, 101, 121, 51, 0, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 118, 97, 108, 117, 101, 49, 0, 118, 97, 108, 117, 101, 50, 0, 118, 97, 108, 117, 101, 51, 0}, + DfChart: {25, 7, 18, 1, 5, 0, 0, 0, 1, 0, 0, 0, 116, 105, 116, 108, 101, 0, 99, 104, 97, 114, 116, 84, 121, 112, 101, 0, 115, 116, 97, 99, 107, 105, 110, 103, 0, 100, 97, 116, 97, 0, 101, 120, 116, 114, 97, 115, 0, 25, 1, 5, 0, 0, 0, 1, 0, 0, 0, 18, 1, 1, 0, 0, 0, 1, 0, 0, 0, 99, 104, 97, 114, 116, 0, 4, 0, 4, 0, 0, 0, 1, 0, 0, 18, 3, 0, 18, 3, 3, 0, 0, 0, 1, 0, 0, 0, 109, 49, 0, 109, 50, 0, 109, 51, 0, 18, 5, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 107, 101, 121, 49, 0, 107, 101, 121, 50, 0, 107, 101, 121, 51, 0, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 118, 97, 108, 117, 101, 49, 0, 118, 97, 108, 117, 101, 50, 0, 118, 97, 108, 117, 101, 51, 0}, +} + +func TestDataForm(t *testing.T) { + by := bytes.NewBuffer([]byte{}) + r := protocol.NewReader(by) + bo := protocol.LittleEndian + + by.Write(dataFormBytes[DfScalar]) + df, err := ParseDataForm(r, bo) + assert.Nil(t, err) + assert.Equal(t, df.GetDataForm(), DfScalar) + assert.Equal(t, df.String(), "string(scalar)") + + by.Write(dataFormBytes[DfTable]) + df, err = ParseDataForm(r, bo) + assert.Nil(t, err) + assert.Equal(t, df.GetDataForm(), DfTable) + assert.Equal(t, df.String(), "table[1r][1c]([\n\t string[3]('col', [col1, col1, col1])\n\t])") + + by.Write(dataFormBytes[DfVector]) + df, err = ParseDataForm(r, bo) + assert.Nil(t, err) + assert.Equal(t, df.GetDataForm(), DfVector) + assert.Equal(t, df.String(), "vector([[1, 2, 3], [4, 5, 6], [7, 8, 9]])") + + by.Write(dataFormBytes[DfPair]) + df, err = ParseDataForm(r, bo) + assert.Nil(t, err) + assert.Equal(t, df.GetDataForm(), DfPair) + assert.Equal(t, df.String(), "pair([key1, key2, key3])") + + by.Write(dataFormBytes[DfMatrix]) + df, err = ParseDataForm(r, bo) + assert.Nil(t, err) + assert.Equal(t, df.GetDataForm(), DfMatrix) + assert.Equal(t, df.String(), "matrix[3r][1c]({\n rows: [value1, value2, value3],\n cols: [value1, value2, value3],\n data: stringArray(3) [\n key1,\n key2,\n key3,\n ]\n})") + + by.Write(dataFormBytes[DfSet]) + df, err = ParseDataForm(r, bo) + assert.Nil(t, err) + assert.Equal(t, df.GetDataForm(), DfSet) + assert.Equal(t, df.String(), "set[3]([key1, key2, key3])") + + by.Write(dataFormBytes[DfDictionary]) + df, err = ParseDataForm(r, bo) + assert.Nil(t, err) + assert.Equal(t, df.GetDataForm(), DfDictionary) + assert.Equal(t, df.String(), "dict([\n string[3]([key1, key2, key3]),\n string[3]([value1, value2, value3]),\n])") + + by.Write(dataFormBytes[DfChart]) + df, err = ParseDataForm(r, bo) + assert.Nil(t, err) + assert.Equal(t, df.GetDataForm(), DfChart) + assert.Equal(t, df.String(), "Chart({\n title: [chart]\n chartType: CT_LINE\n stacking: false\n data: matrix[3r][1c]({\n rows: null,\n cols: null,\n data: stringArray(3) [\n m1,\n m2,\n m3,\n ]\n})\n extras: dict([\n string[3]([key1, key2, key3]),\n string[3]([value1, value2, value3]),\n])\n})") +} diff --git a/model/datatype.go b/model/datatype.go new file mode 100644 index 0000000..fa2feab --- /dev/null +++ b/model/datatype.go @@ -0,0 +1,581 @@ +package model + +import ( + "fmt" + "math" + "time" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +// DataType interface declares functions to handle DataType data. +type DataType interface { + // DataType returns the byte type of DataType + DataType() DataTypeByte + // Render serializes the DataType into w with the bo + Render(w *protocol.Writer, bo protocol.ByteOrder) error + + // String returns the string format of the DataType + String() string + // Bool returns the bool value of the DataType. + // Only when th DataType is DtBool, you can call the func successful + Bool() (bool, error) + + // Value returns an interface value of DataType, you can assert it to get the real value + Value() interface{} + + // HashBucket calculates the hashcode with the value of DataType and buckets + HashBucket(buckets int) int + + // IsNull checks whether the value of DataType is null + IsNull() bool + // SetNull sets the value of DataType to null + SetNull() + + raw() interface{} +} + +type dataType struct { + t DataTypeByte + data interface{} + + bo protocol.ByteOrder +} + +var nullDataForm = &Scalar{ + category: &Category{ + DataForm: DfScalar, + DataType: DtVoid, + }, + DataType: &dataType{ + t: DtVoid, + bo: protocol.LittleEndian, + data: byte(0), + }, +} + +// NewDataType returns an object of DataType according to datatype and arg. +// You should input in according to the datatype. +// See README.md for more details. +func NewDataType(datatype DataTypeByte, arg interface{}) (DataType, error) { + if datatype > 128 { + datatype -= 128 + } else if datatype > 64 { + datatype -= 64 + } + + dt := &dataType{ + t: datatype, + bo: protocol.LittleEndian, + } + + if arg == nil { + dt.SetNull() + return dt, nil + } + + err := dt.renderData(arg) + return dt, err +} + +func (d *dataType) renderData(in interface{}) error { + var err error + switch d.t { + case DtVoid: + d.data = byte(0) + case DtBool: + d.data, err = renderBool(in) + case DtBlob: + d.data, err = renderBlob(in) + case DtChar, DtCompress: + d.data, err = renderByte(in) + case DtComplex, DtPoint: + d.data, err = renderDouble2(in) + case DtDate: + d.data, err = renderDate(in) + case DtDateHour: + d.data, err = renderDateHour(in) + case DtDatetime: + d.data, err = renderDateTime(in) + case DtDouble: + d.data, err = renderDouble(in) + case DtFloat: + d.data, err = renderFloat(in) + case DtDuration: + d.data, err = renderDuration(in) + case DtInt: + d.data, err = renderInt(in) + case DtInt128: + d.data, err = renderInt128(in) + case DtIP: + d.data, err = renderIP(in, d.bo) + case DtLong: + d.data, err = renderLong(in) + case DtMinute: + d.data, err = renderMinute(in) + case DtMonth: + d.data, err = renderMonth(in) + case DtNanoTime: + d.data, err = renderNanoTime(in) + case DtNanoTimestamp: + d.data, err = renderNanoTimestamp(in) + case DtSecond: + d.data, err = renderSecond(in) + case DtShort: + d.data, err = renderShort(in) + case DtTime: + d.data, err = renderTime(in) + case DtTimestamp: + d.data, err = renderTimestamp(in) + case DtUUID: + d.data, err = renderUUID(in) + case DtAny: + d.data, err = renderAny(in) + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + d.data, err = renderString(in) + } + + return err +} + +func (d *dataType) DataType() DataTypeByte { + return d.t +} + +func (d *dataType) Bool() (bool, error) { + if d.t != DtBool { + return false, fmt.Errorf("Bool() is invalid for DataType %s", GetDataTypeString(d.t)) + } + + return d.data.(uint8) == 1, nil +} + +func (d *dataType) raw() interface{} { + return d.data +} + +func (d *dataType) SetNull() { + switch d.t { + case DtVoid: + d.data = byte(0) + case DtBool, DtChar, DtCompress: + d.data = NullBool + case DtBlob: + d.data = NullBlob + case DtComplex, DtPoint: + d.data = NullPoint + case DtDate, DtDateHour, DtDatetime, DtInt, DtMinute, DtMonth, DtSecond, DtTime: + d.data = NullInt + case DtDouble: + d.data = NullDouble + case DtFloat: + d.data = NullFloat + case DtDuration: + d.data = emptyDuration + case DtNanoTime, DtNanoTimestamp, DtLong, DtTimestamp: + d.data = NullLong + case DtShort: + d.data = NullShort + case DtUUID, DtInt128, DtIP: + d.data = emptyInt64List + case DtAny: + d.data = nullDataForm + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + d.data = NullString + } +} + +func (d *dataType) IsNull() bool { + if d.t == DtVoid { + return true + } + + res := false + val := d.Value() + switch r := val.(type) { + case time.Time: + res = r == NullTime + case int32: + res = r == NullInt + case []byte: + res = len(r) == 0 + case int16: + res = r == NullShort + case int8: + res = r == int8(math.MinInt8) + case int64: + res = r == NullLong + case float32: + res = r == NullFloat + case float64: + res = r == NullDouble + case DataForm: + res = r == nil || (r.GetDataForm() == DfScalar && r.GetDataType() == DtVoid) + case string: + switch d.t { + case DtIP: + res = r == NullIP + case DtPoint: + res = r == emptyPoint + case DtInt128: + res = r == NullInt128 + case DtUUID: + res = r == NullUUID + default: + res = r == NullString + } + } + return res +} + +func (d *dataType) String() string { + if d.IsNull() && d.t != DtUUID && d.t != DtIP && d.t != DtPoint && d.t != DtInt128 { + return "" + } + + res := d.Value() + t1, ok := res.(time.Time) + if ok { + switch d.t { + case DtDate: + res = t1.Format("2006.01.02") + case DtDateHour: + res = t1.Format("2006.01.02T15") + case DtDatetime: + res = t1.Format("2006.01.02T15:04:05") + case DtMinute: + res = t1.Format("15:04m") + case DtMonth: + res = t1.Format("2006.01M") + case DtNanoTime: + res = t1.Format("15:04:05.000000000") + case DtNanoTimestamp: + res = t1.Format("2006.01.02T15:04:05.000000000") + case DtSecond: + res = t1.Format("15:04:05") + case DtTime: + res = t1.Format("15:04:05.000") + case DtTimestamp: + res = t1.Format("2006.01.02T15:04:05.000") + } + } + + if d.t == DtBlob { + return fmt.Sprintf("%s", res) + } + + return fmt.Sprintf("%v", res) +} + +func (d *dataType) Value() interface{} { + return value(d.t, d.data, d.bo) +} + +func value(dt DataTypeByte, raw interface{}, bo protocol.ByteOrder) interface{} { + var res interface{} + switch dt { + case DtVoid: + res = void + case DtBool: + byt := raw.(byte) + if byt == MinInt8 { + res = int8(-128) + } else { + res = byt == 1 + } + case DtBlob: + res = raw.([]byte) + case DtChar, DtCompress: + res = int8(raw.(byte)) + case DtComplex: + res = parseComplex(raw) + case DtDate: + res = parseDate(raw) + case DtDateHour: + res = parseDateHour(raw) + case DtDatetime: + res = parseDateTime(raw) + case DtDouble: + res = raw.(float64) + case DtFloat: + res = raw.(float32) + case DtDuration: + res = parseDuration(raw) + case DtInt: + res = raw.(int32) + case DtInt128: + res = parseInt128(raw) + case DtIP: + res = parseIP(raw, bo) + case DtLong: + res = raw.(int64) + case DtMinute: + res = parseMinute(raw) + case DtAny: + res = raw.(DataForm) + case DtMonth: + res = parseMonth(raw) + case DtNanoTime: + res = parseNanoTime(raw) + case DtNanoTimestamp: + res = parseNanoTimeStamp(raw) + case DtPoint: + res = parsePoint(raw) + case DtSecond: + res = parseSecond(raw) + case DtShort: + res = raw.(int16) + case DtTime: + res = parseTime(raw) + case DtTimestamp: + res = parseTimeStamp(raw) + case DtUUID: + res = parseUUID(raw, bo) + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + res = raw.(string) + } + + return res +} + +func (d *dataType) HashBucket(buckets int) int { + switch d.t { + case DtDuration: + return 0 + case DtFloat: + return -1 + case DtInt, DtDate, DtTime, DtMonth, DtMinute, DtSecond, DtDateHour, DtDatetime: + return d.intHashBucket(buckets) + case DtInt128, DtIP, DtUUID: + return d.int128HashBucket(buckets) + case DtChar: + return d.charHashBucket(buckets) + case DtPoint: + return -1 + case DtShort: + return d.shortHashBucket(buckets) + case DtLong, DtNanoTime, DtNanoTimestamp, DtTimestamp, DtComplex: + return d.longHashBucket(buckets) + case DtString, DtSymbol: + return d.stringHashBucket(buckets) + default: + return 0 + } +} + +func (d *dataType) stringHashBucket(buckets int) int { + val := d.data.(string) + + byteCount := 0 + rs := []rune(val) + for _, c := range rs { + switch { + case c >= '\u0001' && c <= '\u007f': + byteCount++ + case c == '\u0000' || (c >= '\u0080' && c <= '\u07ff'): + byteCount += 2 + default: + byteCount += 3 + } + } + + h := uint32(byteCount) + if byteCount == len(rs) { + h = d.hashNormalChar(byteCount, h, rs) + } else { + h = d.hashSpecialChar(val, h) + } + + return int(h) % buckets +} + +func (d *dataType) hashNormalChar(byteCount int, h uint32, rs []rune) uint32 { + l := byteCount / 4 + for i := 0; i < l; i++ { + offSet := i * 4 + k := uint32((rs[offSet] & 0xff) + ((rs[offSet+1] & 0xff) << 8) + + ((rs[offSet+2] & 0xff) << 16) + ((rs[offSet+3] & 0xff) << 24)) + k *= 0x5bd1e995 + k ^= k >> 24 + k *= 0x5bd1e995 + h *= 0x5bd1e995 + h ^= k + } + + switch byteCount % 4 { + case 3: + h ^= uint32((rs[byteCount&^3+2] & 0xff) << 16) + fallthrough + case 2: + h ^= uint32((rs[byteCount&^3+1] & 0xff) << 8) + fallthrough + case 1: + h ^= uint32(rs[byteCount&^3] & 0xff) + h *= 0x5bd1e995 + } + + h ^= h >> 13 + h *= 0x5bd1e995 + h ^= h >> 15 + + return h +} + +func (d *dataType) hashSpecialChar(val string, h uint32) uint32 { + k := uint32(0) + cursor := 0 + for _, c := range val { + switch { + case c >= '\u0001' && c <= '\u007f': + k += uint32(c << (8 * cursor)) + cursor++ + case c == '\u0000' || (c >= '\u0080' && c <= '\u07ff'): + k += uint32((0xc0 | (0x1f & (c >> 6))) << (8 * cursor)) + cursor++ + if cursor == 4 { + h = specificBitCalculate(k, h) + k = 0 + cursor = 0 + } + + k += uint32((0x80 | (0x3f & c)) << (8 * cursor)) + cursor++ + default: + k += uint32((0xe0 | (0x0f & (c >> 12))) << (8 * cursor)) + cursor++ + if cursor == 4 { + h = specificBitCalculate(k, h) + k = 0 + cursor = 0 + } + k += uint32((0x80 | (0x3f & (c >> 6))) << (8 * cursor)) + cursor++ + if cursor == 4 { + h = specificBitCalculate(k, h) + k = 0 + cursor = 0 + } + k += uint32((0x80 | (0x3f & c)) << (8 * cursor)) + cursor++ + } + + if cursor == 4 { + h = specificBitCalculate(k, h) + k = 0 + cursor = 0 + } + } + + if cursor > 0 { + h ^= k + h *= 0x5bd1e995 + } + + h ^= h >> 13 + h *= 0x5bd1e995 + h ^= h >> 15 + + return h +} + +func specificBitCalculate(k, h uint32) uint32 { + k *= 0x5bd1e995 + k ^= k >> 24 + k *= 0x5bd1e995 + h *= 0x5bd1e995 + h ^= k + + return h +} + +func (d *dataType) longHashBucket(buckets int) int { + value := d.data.(int64) + switch { + case value >= 0: + return int(value % int64(buckets)) + case value == math.MinInt64: + return -1 + default: + return ((math.MaxInt64 % buckets) + 2 + ((math.MaxInt64 + int(value)) % buckets)) % buckets + } +} + +func (d *dataType) int128HashBucket(buckets int) int { + p := d.data.([2]uint64) + m := 0x5bd1e995 + r := 24 + h := uint32(16) + + k1 := uint32(p[0] & math.MaxUint32) + k2 := uint32(p[0] >> 32) + k3 := uint32(p[1] & math.MaxUint32) + k4 := uint32(p[1] >> 32) + + k1 *= uint32(m) + k1 ^= k1 >> r + k1 *= uint32(m) + + k2 *= uint32(m) + k2 ^= k2 >> r + k2 *= uint32(m) + + k3 *= uint32(m) + k3 ^= k3 >> r + k3 *= uint32(m) + + k4 *= uint32(m) + k4 ^= k4 >> r + k4 *= uint32(m) + + h *= uint32(m) + h ^= k1 + h *= uint32(m) + h ^= k2 + h *= uint32(m) + h ^= k3 + h *= uint32(m) + h ^= k4 + + h ^= h >> 13 + h *= uint32(m) + h ^= h >> 15 + + return int(h) % buckets +} + +func (d *dataType) charHashBucket(buckets int) int { + value := d.data.(uint8) + r := int(int8(value)) + switch { + case r >= 0: + return r % buckets + case r == math.MinInt8: + return -1 + default: + return (r + 4294967296) % buckets + } +} + +func (d *dataType) shortHashBucket(buckets int) int { + value := d.data.(int16) + switch { + case value >= 0: + return int(value) % buckets + case value == math.MinInt16: + return -1 + default: + return (int(value) + 4294967296) % buckets + } +} + +func (d *dataType) intHashBucket(buckets int) int { + value := d.data.(int32) + switch { + case value >= 0: + return int(value) % buckets + case value == math.MinInt32: + return -1 + default: + return (int(value) + 4294967296) % buckets + } +} diff --git a/model/datatype_list.go b/model/datatype_list.go new file mode 100644 index 0000000..86dbc9a --- /dev/null +++ b/model/datatype_list.go @@ -0,0 +1,1741 @@ +package model + +import ( + "errors" + "fmt" + "math" + "strings" + "time" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +// DataTypeList interface declares functions to handle DataType list. +type DataTypeList interface { + // DataType returns the byte type of the DataTypeList + DataType() DataTypeByte + // Render serializes the DataForm with bo and input it into w + Render(w *protocol.Writer, bo protocol.ByteOrder) error + + // Len returns the length of DataTypeList + Len() int + // Get returns a DataType value in DataTypeList according to the ind + // which must be less than len(DataTypeList) + Get(ind int) DataType + // Set inserts dt into DataTypeList according to the ind + // which must be less than or equal to len(DataTypeList). + // If ind < len(DataTypeList), cover the original value in DataTypeList + Set(ind int, t DataType) error + // SetWithRaw inserts raw data into DataTypeList according to the ind + // which must be less than or equal to len(DataTypeList). + // If ind < len(DataTypeList), cover the original value in DataTypeList + SetWithRaw(ind int, t interface{}) error + // Append inserts a DataType value to the end of DataTypeList. + // The type of d must be the same as DataTypeList's + Append(t DataType) DataTypeList + // Sub returns the len end-st of DataTypeList. + // End must be larger than st, + // but less than the len of DataTypeList. + // St must be large than -1 + Sub(st, end int) DataTypeList + // AsOf returns the index of the d in DataTypeList. + // If d is not in DataTypeList, returns -1 + AsOf(d DataType) int + + // StringList returns the string array of Datatype list + StringList() []string + + // Value returns the value of Datatype list + Value() []interface{} + + // IsNull checks whether the value of DataType is null based on the index + IsNull(ind int) bool + // SetNull sets the value of DataType with index ind to null + SetNull(ind int) + // GetSubList instantiates a DataTypeList with the values in indexes which is a list of index. + // Value in indexes should be less than the length of DataTypeList + GetSubList(indexes []int) DataTypeList + // ElementValue returns the value of Datatype list element according to the ind + ElementValue(ind int) interface{} + // ElementString returns the string value of Datatype list element according to the ind + ElementString(ind int) string + + combine(dtl DataTypeList) (DataTypeList, error) +} + +type dataTypeList struct { + count int + t DataTypeByte + bo protocol.ByteOrder + + shortData []int16 + intData []int32 + longData []int64 + floatData []float32 + doubleData []float64 + stringData []string + charData []uint8 + blobData [][]byte + + anyData []DataForm + double2Data []float64 + long2Data []uint64 + durationData []uint32 +} + +// NewDataTypeList instantiates a DataTypeList according to the datatype and data. +// The DataType byte of element in data should be equal to d. +func NewDataTypeList(datatype DataTypeByte, data []DataType) DataTypeList { + size := len(data) + res := &dataTypeList{ + count: size, + t: datatype, + bo: protocol.LittleEndian, + } + + switch datatype { + case DtVoid, DtBool, DtChar: + res.charData = make([]uint8, size) + for k, v := range data { + res.charData[k] = v.raw().(uint8) + } + case DtShort: + res.shortData = make([]int16, size) + for k, v := range data { + res.shortData[k] = v.raw().(int16) + } + case DtFloat: + res.floatData = make([]float32, size) + for k, v := range data { + res.floatData[k] = v.raw().(float32) + } + case DtDouble: + res.doubleData = make([]float64, size) + for k, v := range data { + res.doubleData[k] = v.raw().(float64) + } + case DtDuration: + res.durationData = make([]uint32, 0, 2*size) + for _, v := range data { + tmp := v.raw().([2]uint32) + res.durationData = append(res.durationData, tmp[0], tmp[1]) + } + case DtInt, DtDate, DtMonth, DtTime, DtMinute, DtSecond, DtDatetime, DtDateHour, DtDateMinute: + res.intData = make([]int32, size) + for k, v := range data { + res.intData[k] = v.raw().(int32) + } + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + res.longData = make([]int64, size) + for k, v := range data { + res.longData[k] = v.raw().(int64) + } + case DtInt128, DtIP, DtUUID: + res.long2Data = make([]uint64, 0, 2*size) + for _, v := range data { + tmp := v.raw().([2]uint64) + res.long2Data = append(res.long2Data, tmp[0], tmp[1]) + } + case DtComplex, DtPoint: + res.double2Data = make([]float64, 0, 2*size) + for _, v := range data { + tmp := v.raw().([2]float64) + res.double2Data = append(res.double2Data, tmp[0], tmp[1]) + } + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + res.stringData = make([]string, size) + for k, v := range data { + res.stringData[k] = v.raw().(string) + } + case DtBlob: + res.blobData = make([][]byte, size) + for k, v := range data { + res.blobData[k] = v.raw().([]byte) + } + case DtAny: + res.anyData = make([]DataForm, size) + for k, v := range data { + res.anyData[k] = v.raw().(DataForm) + } + } + + return res +} + +// NewEmptyDataTypeList instantiates an empty DataTypeList. +func NewEmptyDataTypeList(datatype DataTypeByte, size int) DataTypeList { + if datatype > 128 { + datatype -= 128 + } else if datatype > 64 { + datatype -= 64 + } + + res := &dataTypeList{ + count: size, + t: datatype, + bo: protocol.LittleEndian, + } + + switch datatype { + case DtVoid, DtBool, DtChar: + res.charData = make([]uint8, size) + case DtShort: + res.shortData = make([]int16, size) + case DtFloat: + res.floatData = make([]float32, size) + case DtDouble: + res.doubleData = make([]float64, size) + case DtDuration: + res.durationData = make([]uint32, 2*size) + case DtInt, DtDate, DtMonth, DtTime, DtMinute, DtSecond, DtDatetime, DtDateHour, DtDateMinute: + res.intData = make([]int32, size) + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + res.longData = make([]int64, size) + case DtInt128, DtIP, DtUUID: + res.long2Data = make([]uint64, 2*size) + case DtComplex, DtPoint: + res.double2Data = make([]float64, 2*size) + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + res.stringData = make([]string, size) + case DtBlob: + res.blobData = make([][]byte, size) + case DtAny: + res.anyData = make([]DataForm, size) + } + for i := 0; i < size; i++ { + res.SetNull(i) + } + + return res +} + +// NewDataTypeListWithRaw instantiates a DataTypeList with specified datatype and args. +// Refer to README_CN.md for the valid type of args and d. +func NewDataTypeListWithRaw(datatype DataTypeByte, args interface{}) (DataTypeList, error) { + var err error + + if datatype > 128 { + datatype -= 128 + } else if datatype > 64 { + datatype -= 64 + } + + res := &dataTypeList{ + t: datatype, + bo: protocol.LittleEndian, + } + + switch datatype { + case DtBool: + err = res.renderBool(args) + case DtBlob: + err = res.renderBlob(args) + case DtChar, DtCompress: + err = res.renderByte(args) + case DtComplex, DtPoint: + err = res.renderDouble2(args) + case DtDate: + err = res.renderDate(args) + case DtDateHour: + err = res.renderDateHour(args) + case DtDatetime: + err = res.renderDateTime(args) + case DtDouble: + err = res.renderDouble(args) + case DtFloat: + err = res.renderFloat(args) + case DtDuration: + err = res.renderDuration(args) + case DtInt: + err = res.renderInt(args) + case DtInt128: + err = res.renderInt128(args) + case DtIP: + err = res.renderIP(args, res.bo) + case DtLong: + err = res.renderLong(args) + case DtMinute: + err = res.renderMinute(args) + case DtMonth: + err = res.renderMonth(args) + case DtNanoTime: + err = res.renderNanoTime(args) + case DtNanoTimestamp: + err = res.renderNanoTimestamp(args) + case DtSecond: + err = res.renderSecond(args) + case DtShort: + err = res.renderShort(args) + case DtTime: + err = res.renderTime(args) + case DtTimestamp: + err = res.renderTimestamp(args) + case DtUUID: + err = res.renderUUID(args) + case DtAny: + err = res.renderAny(args) + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + err = res.renderString(args) + default: + return nil, fmt.Errorf("invalid DataType %d", datatype) + } + + return res, err +} + +func (d *dataTypeList) SetNull(ind int) { + if ind >= d.count { + return + } + + switch d.t { + case DtBool, DtChar, DtCompress: + d.charData[ind] = NullBool + case DtBlob: + d.blobData[ind] = NullBlob + case DtComplex, DtPoint: + i := 2 * ind + d.double2Data[i] = -math.MaxFloat64 + d.double2Data[i+1] = -math.MaxFloat64 + case DtDate, DtDateHour, DtDatetime, DtInt, DtMinute, DtMonth, DtSecond, DtTime: + d.intData[ind] = NullInt + case DtDouble: + d.doubleData[ind] = NullDouble + case DtFloat: + d.floatData[ind] = NullFloat + case DtDuration: + i := 2 * ind + d.durationData[i] = MinInt32 + d.durationData[i+1] = 0 + case DtNanoTime, DtNanoTimestamp, DtLong, DtTimestamp: + d.longData[ind] = NullLong + case DtShort: + d.shortData[ind] = NullShort + case DtUUID, DtInt128, DtIP: + i := 2 * ind + d.long2Data[i] = 0 + d.long2Data[i+1] = 0 + case DtAny: + d.anyData[ind] = nullDataForm + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + d.stringData[ind] = NullString + } +} + +func (d *dataTypeList) ElementValue(ind int) interface{} { + if ind >= d.count { + return nil + } + + var res interface{} + switch d.t { + case DtVoid: + res = void + case DtBool: + byt := d.charData[ind] + if byt == MinInt8 { + res = int8(-128) + } else { + res = byt == 1 + } + case DtBlob: + res = d.blobData[ind] + case DtChar, DtCompress: + res = int8(d.charData[ind]) + case DtComplex: + i := 2 * ind + res = parseComplex([2]float64{d.double2Data[i], d.double2Data[i+1]}) + case DtPoint: + i := 2 * ind + res = parsePoint([2]float64{d.double2Data[i], d.double2Data[i+1]}) + case DtDate: + res = parseDate(d.intData[ind]) + case DtDateHour: + res = parseDateHour(d.intData[ind]) + case DtDatetime: + res = parseDateTime(d.intData[ind]) + case DtDouble: + res = d.doubleData[ind] + case DtFloat: + res = d.floatData[ind] + case DtDuration: + i := 2 * ind + res = parseDuration([2]uint32{d.durationData[i], d.durationData[i+1]}) + case DtInt: + res = d.intData[ind] + case DtInt128: + i := 2 * ind + res = parseInt128([2]uint64{d.long2Data[i], d.long2Data[i+1]}) + case DtIP: + i := 2 * ind + res = parseIP([2]uint64{d.long2Data[i], d.long2Data[i+1]}, d.bo) + case DtLong: + res = d.longData[ind] + case DtMinute: + res = parseMinute(d.intData[ind]) + case DtAny: + res = d.anyData[ind] + case DtMonth: + res = parseMonth(d.intData[ind]) + case DtNanoTime: + res = parseNanoTime(d.longData[ind]) + case DtNanoTimestamp: + res = parseNanoTimeStamp(d.longData[ind]) + case DtSecond: + res = parseSecond(d.intData[ind]) + case DtShort: + res = d.shortData[ind] + case DtTime: + res = parseTime(d.intData[ind]) + case DtTimestamp: + res = parseTimeStamp(d.longData[ind]) + case DtUUID: + i := 2 * ind + res = parseUUID([2]uint64{d.long2Data[i], d.long2Data[i+1]}, d.bo) + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + res = d.stringData[ind] + } + + return res +} + +func (d *dataTypeList) ElementString(ind int) string { + if ind >= d.count { + return "" + } + + if d.IsNull(ind) && d.t != DtUUID && d.t != DtIP && + d.t != DtPoint && d.t != DtInt128 { + return "" + } + + raw := d.ElementValue(ind) + if d.t == DtDate || d.t == DtDateHour || d.t == DtDatetime || d.t == DtMinute || d.t == DtMonth || + d.t == DtNanoTime || d.t == DtNanoTimestamp || d.t == DtTime || d.t == DtTimestamp || + d.t == DtSecond { + times := []time.Time{raw.(time.Time)} + return formatTime(d.t, times)[0] + } else if d.t == DtBlob { + return fmt.Sprintf("%s", raw) + } + + return fmt.Sprintf("%v", raw) +} + +func (d *dataTypeList) combine(in DataTypeList) (DataTypeList, error) { + if d.t != in.DataType() { + return nil, errors.New("the DataType must be the same when you call combine") + } + + original := in.(*dataTypeList) + res := &dataTypeList{ + bo: d.bo, + count: d.Len() + in.Len(), + t: d.t, + } + + switch d.t { + case DtBool, DtChar, DtCompress: + res.charData = make([]uint8, res.count) + copy(res.charData, d.charData) + copy(res.charData, original.charData) + case DtBlob: + res.blobData = make([][]byte, res.count) + copy(res.blobData, d.blobData) + copy(res.blobData, original.blobData) + case DtComplex, DtPoint: + res.double2Data = make([]float64, res.count*2) + copy(res.double2Data, d.double2Data) + copy(res.double2Data, original.double2Data) + case DtDate, DtDateHour, DtDatetime, DtInt, DtMinute, DtMonth, DtSecond, DtTime: + res.intData = make([]int32, res.count) + copy(res.intData, d.intData) + copy(res.intData, original.intData) + case DtDouble: + res.doubleData = make([]float64, res.count) + copy(res.doubleData, d.doubleData) + copy(res.doubleData, original.doubleData) + case DtFloat: + res.floatData = make([]float32, res.count) + copy(res.floatData, d.floatData) + copy(res.floatData, original.floatData) + case DtDuration: + res.durationData = make([]uint32, res.count*2) + copy(res.durationData, d.durationData) + copy(res.durationData, original.durationData) + case DtNanoTime, DtNanoTimestamp, DtLong, DtTimestamp: + res.longData = make([]int64, res.count) + copy(res.longData, d.longData) + copy(res.longData, original.longData) + case DtShort: + res.shortData = make([]int16, res.count) + copy(res.shortData, d.shortData) + copy(res.shortData, original.shortData) + case DtUUID, DtInt128, DtIP: + res.long2Data = make([]uint64, res.count*2) + copy(res.long2Data, d.long2Data) + copy(res.long2Data, original.long2Data) + case DtAny: + res.anyData = make([]DataForm, res.count) + copy(res.anyData, d.anyData) + copy(res.anyData, original.anyData) + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + res.stringData = make([]string, res.count) + copy(res.stringData, d.stringData) + copy(res.stringData, original.stringData) + } + return res, nil +} + +func (d *dataTypeList) IsNull(ind int) bool { + if ind >= d.count { + return true + } + + res := false + switch d.t { + case DtVoid: + res = true + case DtBool, DtChar, DtCompress: + res = d.charData[ind] == NullBool + case DtBlob: + res = len(d.blobData[ind]) == 0 + case DtComplex, DtPoint: + i := 2 * ind + res = d.double2Data[i] == -math.MaxFloat64 && d.double2Data[i+1] == -math.MaxFloat64 + case DtDate, DtDateHour, DtDatetime, DtInt, DtMinute, DtMonth, DtSecond, DtTime: + res = d.intData[ind] == NullInt + case DtDouble: + res = d.doubleData[ind] == NullDouble + case DtFloat: + res = d.floatData[ind] == NullFloat + case DtDuration: + i := 2 * ind + res = d.durationData[i] == MinInt32 && d.durationData[i+1] == 0 + case DtNanoTime, DtNanoTimestamp, DtLong, DtTimestamp: + res = d.longData[ind] == NullLong + case DtShort: + res = d.shortData[ind] == NullShort + case DtAny: + df := d.anyData[ind] + res = df == nil || (df.GetDataForm() == DfScalar && df.GetDataType() == DtVoid) + case DtUUID, DtInt128, DtIP: + i := 2 * ind + res = d.long2Data[i] == 0 && d.long2Data[i+1] == 0 + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + res = d.stringData[ind] == NullString + } + + return res +} + +func (d *dataTypeList) Value() []interface{} { + res := make([]interface{}, d.count) + switch d.t { + case DtVoid: + for i := 0; i < d.count; i++ { + res[i] = void + } + case DtBool: + parseBools(d.charData, res) + case DtBlob: + parseBlobs(d.blobData, res) + case DtChar, DtCompress: + parseBytes(d.charData, res) + case DtComplex: + parseComplexes(d.count, d.double2Data, res) + case DtPoint: + parsePoints(d.count, d.double2Data, res) + case DtDate: + parseDates(d.intData, res) + case DtDateHour: + parseDateHours(d.intData, res) + case DtDatetime: + parseDateTimes(d.intData, res) + case DtDouble: + parseDoubles(d.doubleData, res) + case DtFloat: + parseFloats(d.floatData, res) + case DtDuration: + parseDurations(d.count, d.durationData, res) + case DtInt: + parseInt(d.intData, res) + case DtInt128: + parseInt128s(d.count, d.long2Data, res) + case DtAny: + parseAny(d.anyData, res) + case DtIP: + parseIPs(d.count, d.long2Data, res, d.bo) + case DtLong: + parseLongs(d.longData, res) + case DtMinute: + parseMinutes(d.intData, res) + case DtMonth: + parseMonths(d.intData, res) + case DtNanoTime: + parseNanoTimes(d.longData, res) + case DtNanoTimestamp: + parseNanoTimeStamps(d.longData, res) + case DtSecond: + parseSeconds(d.intData, res) + case DtShort: + parseShorts(d.shortData, res) + case DtTime: + parseTimes(d.intData, res) + case DtTimestamp: + parseTimeStamps(d.longData, res) + case DtUUID: + parseUUIDs(d.count, d.long2Data, res, d.bo) + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + parseStrings(d.stringData, res) + } + + return res +} + +func (d *dataTypeList) AsOf(t DataType) int { + if d.t != t.DataType() { + return -1 + } + + cg := GetCategory(d.DataType()) + if cg == MIXED || cg == NOTHING || cg == LOGICAL || + cg == SYSTEM || cg == BINARY { + return -1 + } + + end := d.Len() - 1 + switch d.DataType() { + case DtShort: + end = d.shortAsOf(t) + case DtInt, DtTime, DtSecond, DtMinute, DtDateHour, DtDate, + DtDateMinute, DtDatetime, DtMonth: + end = d.intAsOf(t) + case DtLong, DtNanoTime, DtNanoTimestamp, DtTimestamp: + end = d.longAsOf(t) + case DtChar: + end = d.charAsOf(t) + case DtDouble: + end = d.doubleAsOf(t) + case DtFloat: + end = d.floatAsOf(t) + case DtString, DtSymbol: + end = d.stringAsOf(t) + } + + return end +} + +func (d *dataTypeList) Set(ind int, t DataType) error { + if d.count <= ind { + return fmt.Errorf("index %d exceeds the number of data %d", ind, d.count) + } else if d == nil { + d.SetNull(ind) + return nil + } + + if !isEqualDataTypeByte(d.t, t.DataType()) { + return fmt.Errorf("failed to set DataType(%s) into DataTypeList(%s)", + GetDataTypeString(t.DataType()), GetDataTypeString(d.t)) + } + + return d.SetWithRaw(ind, t.raw()) +} + +func isEqualDataTypeByte(a, b DataTypeByte) bool { + if a == b || (a == DtSymbol && b == DtString) || (b == DtSymbol && a == DtString) { + return true + } + + return false +} + +func (d *dataTypeList) SetWithRaw(ind int, t interface{}) error { + if d.count <= ind { + return fmt.Errorf("index %d exceeds the number of data %d", ind, d.count) + } + + switch d.t { + case DtVoid, DtBool, DtChar: + d.charData[ind] = t.(uint8) + case DtShort: + d.shortData[ind] = t.(int16) + case DtFloat: + d.floatData[ind] = t.(float32) + case DtDouble: + d.doubleData[ind] = t.(float64) + case DtDuration: + tmp := t.([2]uint32) + i := 2 * ind + d.durationData[i] = tmp[0] + d.durationData[i+1] = tmp[1] + case DtInt, DtDate, DtMonth, DtTime, DtMinute, DtSecond, DtDatetime, DtDateHour, DtDateMinute: + d.intData[ind] = t.(int32) + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + d.longData[ind] = t.(int64) + case DtInt128, DtIP, DtUUID: + tmp := t.([2]uint64) + i := 2 * ind + d.long2Data[i] = tmp[0] + d.long2Data[i+1] = tmp[1] + case DtComplex, DtPoint: + tmp := t.([2]float64) + i := 2 * ind + d.double2Data[i] = tmp[0] + d.double2Data[i+1] = tmp[1] + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + d.stringData[ind] = t.(string) + case DtBlob: + d.blobData[ind] = t.([]byte) + case DtAny: + d.anyData[ind] = t.(DataForm) + } + + return nil +} + +func (d *dataTypeList) Append(t DataType) DataTypeList { + switch t.DataType() { + case DtVoid, DtBool, DtChar: + d.charData = append(d.charData, t.raw().(uint8)) + case DtShort: + d.shortData = append(d.shortData, t.raw().(int16)) + case DtFloat: + d.floatData = append(d.floatData, t.raw().(float32)) + case DtDouble: + d.doubleData = append(d.doubleData, t.raw().(float64)) + case DtDuration: + tmp := t.raw().([2]uint32) + d.durationData = append(d.durationData, tmp[0], tmp[1]) + case DtInt, DtDate, DtMonth, DtTime, DtMinute, DtSecond, DtDatetime, DtDateHour, DtDateMinute: + d.intData = append(d.intData, t.raw().(int32)) + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + d.longData = append(d.longData, t.raw().(int64)) + case DtInt128, DtIP, DtUUID: + tmp := t.raw().([2]uint64) + d.long2Data = append(d.long2Data, tmp[0], tmp[1]) + case DtComplex, DtPoint: + tmp := t.raw().([2]float64) + d.double2Data = append(d.double2Data, tmp[0], tmp[1]) + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + d.stringData = append(d.stringData, t.raw().(string)) + case DtBlob: + d.blobData = append(d.blobData, t.raw().([]byte)) + case DtAny: + d.anyData = append(d.anyData, t.raw().(DataForm)) + } + d.count++ + return d +} + +func (d *dataTypeList) Len() int { + return d.count +} + +func (d *dataTypeList) DataType() DataTypeByte { + return d.t +} + +func (d *dataTypeList) Get(ind int) DataType { + if ind >= d.count { + return nil + } + + t := &dataType{ + t: d.t, + bo: d.bo, + } + + switch d.t { + case DtVoid, DtBool, DtChar: + t.data = d.charData[ind] + case DtShort: + t.data = d.shortData[ind] + case DtFloat: + t.data = d.floatData[ind] + case DtDouble: + t.data = d.doubleData[ind] + case DtDuration: + i := 2 * ind + t.data = [2]uint32{d.durationData[i], d.durationData[i+1]} + case DtInt, DtDate, DtMonth, DtTime, DtMinute, DtSecond, DtDatetime, DtDateHour, DtDateMinute: + t.data = d.intData[ind] + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + t.data = d.longData[ind] + case DtInt128, DtIP, DtUUID: + i := 2 * ind + t.data = [2]uint64{d.long2Data[i], d.long2Data[i+1]} + case DtComplex, DtPoint: + i := 2 * ind + t.data = [2]float64{d.double2Data[i], d.double2Data[i+1]} + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + t.data = d.stringData[ind] + case DtBlob: + t.data = d.blobData[ind] + case DtAny: + t.data = d.anyData[ind] + } + + return t +} + +func (d *dataTypeList) GetSubList(indexes []int) DataTypeList { + length := len(indexes) + res := &dataTypeList{ + t: d.t, + bo: d.bo, + count: length, + } + + switch d.t { + case DtVoid, DtBool, DtChar: + res.charData = make([]uint8, length) + for k, v := range indexes { + res.charData[k] = d.charData[v] + } + case DtShort: + res.shortData = make([]int16, length) + for k, v := range indexes { + res.shortData[k] = d.shortData[v] + } + case DtFloat: + res.floatData = make([]float32, length) + for k, v := range indexes { + res.floatData[k] = d.floatData[v] + } + case DtDouble: + res.doubleData = make([]float64, length) + for k, v := range indexes { + res.doubleData[k] = d.doubleData[v] + } + case DtDuration: + res.durationData = make([]uint32, 0, 2*length) + for _, v := range indexes { + ind := 2 * v + res.durationData = append(res.durationData, d.durationData[ind], d.durationData[ind+1]) + } + case DtInt, DtDate, DtMonth, DtTime, DtMinute, DtSecond, DtDatetime, DtDateHour, DtDateMinute: + res.intData = make([]int32, length) + for k, v := range indexes { + res.intData[k] = d.intData[v] + } + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + res.longData = make([]int64, length) + for k, v := range indexes { + res.longData[k] = d.longData[v] + } + case DtInt128, DtIP, DtUUID: + res.long2Data = make([]uint64, 0, 2*length) + for _, v := range indexes { + ind := 2 * v + res.long2Data = append(res.long2Data, d.long2Data[ind], d.long2Data[ind+1]) + } + case DtComplex, DtPoint: + res.double2Data = make([]float64, 0, 2*length) + for _, v := range indexes { + ind := 2 * v + res.double2Data = append(res.double2Data, d.double2Data[ind], d.double2Data[ind+1]) + } + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + res.stringData = make([]string, length) + for k, v := range indexes { + res.stringData[k] = d.stringData[v] + } + case DtBlob: + res.blobData = make([][]byte, length) + for k, v := range indexes { + res.blobData[k] = d.blobData[v] + } + case DtAny: + res.anyData = make([]DataForm, length) + for k, v := range indexes { + res.anyData[k] = d.anyData[v] + } + } + + return res +} + +func (d *dataTypeList) Sub(start, end int) DataTypeList { + if start < 0 || d.Len() < end || start >= end { + return nil + } + + res := &dataTypeList{ + count: end - start, + t: d.t, + bo: d.bo, + } + + switch d.t { + case DtVoid, DtBool, DtChar: + res.charData = d.charData[start:end] + case DtShort: + res.shortData = d.shortData[start:end] + case DtFloat: + res.floatData = d.floatData[start:end] + case DtDouble: + res.doubleData = d.doubleData[start:end] + case DtDuration: + res.durationData = d.durationData[2*start : 2*end] + case DtInt, DtDate, DtMonth, DtTime, DtMinute, DtSecond, DtDatetime, DtDateHour, DtDateMinute: + res.intData = d.intData[start:end] + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + res.longData = d.longData[start:end] + case DtInt128, DtIP, DtUUID: + res.long2Data = d.long2Data[2*start : 2*end] + case DtComplex, DtPoint: + res.double2Data = d.double2Data[2*start : 2*end] + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + res.stringData = d.stringData[start:end] + case DtBlob: + res.blobData = d.blobData[start:end] + case DtAny: + res.anyData = d.anyData[start:end] + } + + return res +} + +func (d *dataTypeList) Render(w *protocol.Writer, bo protocol.ByteOrder) error { + if d.Len() == 0 { + return nil + } + + var err error + switch d.t { + case DtString, DtCode, DtFunction, DtHandle, DtDictionary, DtSymbol: + err = writeStrings(w, d.stringData) + case DtBlob: + err = writeBlobs(w, d.blobData) + case DtAny: + for _, v := range d.anyData { + err := v.Render(w, bo) + if err != nil { + return err + } + } + case DtBool, DtChar, DtCompress: + err = w.Write(d.charData) + case DtInt, DtTime, DtDate, DtMonth, DtMinute, DtSecond, DtDatetime, DtDateHour: + err = w.Write(protocol.ByteSliceFromInt32Slice(d.intData)) + case DtShort: + err = w.Write(protocol.ByteSliceFromInt16Slice(d.shortData)) + case DtVoid: + err = writeVoids(w, d.count) + case DtDouble: + err = w.Write(protocol.ByteSliceFromFloat64Slice(d.doubleData)) + case DtFloat: + err = w.Write(protocol.ByteSliceFromFloat32Slice(d.floatData)) + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + err = w.Write(protocol.ByteSliceFromInt64Slice(d.longData)) + case DtDuration: + err = writeDurations(w, d.durationData) + case DtPoint, DtComplex: + err = writeDouble2s(w, d.double2Data) + case DtInt128, DtUUID, DtIP: + err = writeLong2s(w, d.long2Data) + } + + return err +} + +func (d *dataTypeList) StringList() []string { + tmp := d.Value() + res := make([]string, len(tmp)) + if d.t == DtDate || d.t == DtDateHour || d.t == DtDatetime || d.t == DtMinute || d.t == DtMonth || + d.t == DtNanoTime || d.t == DtNanoTimestamp || d.t == DtTime || d.t == DtTimestamp || + d.t == DtSecond { + times := make([]time.Time, len(tmp)) + for k, v := range tmp { + times[k] = v.(time.Time) + } + res = formatTime(d.t, times) + } else { + switch { + case d.t == DtBlob: + for k, v := range tmp { + res[k] = fmt.Sprintf("%s", v) + } + case d.t != DtUUID && d.t != DtIP && d.t != DtPoint && d.t != DtInt128: + for k, v := range tmp { + if d.IsNull(k) { + res[k] = "" + } else { + res[k] = fmt.Sprintf("%v", v) + } + } + default: + for k, v := range tmp { + res[k] = fmt.Sprintf("%v", v) + } + } + } + + return res +} + +func formatTime(dt DataTypeByte, times []time.Time) []string { + res := make([]string, len(times)) + layout := "" + + switch dt { + case DtDate: + layout = "2006.01.02" + case DtDateHour: + layout = "2006.01.02T15" + case DtDatetime: + layout = "2006.01.02T15:04:05" + case DtMinute: + layout = "15:04m" + case DtMonth: + layout = "2006.01M" + case DtNanoTime: + layout = "15:04:05.000000000" + case DtNanoTimestamp: + layout = "2006.01.02T15:04:05.000000000" + case DtSecond: + layout = "15:04:05" + case DtTime: + layout = "15:04:05.000" + case DtTimestamp: + layout = "2006.01.02T15:04:05.000" + } + + for k, v := range times { + if v != emptyTime { + res[k] = v.Format(layout) + } else { + res[k] = "" + } + } + + return res +} + +func (d *dataTypeList) shortAsOf(t DataType) int { + s2 := t.Value().(int16) + val := d.shortData + end := d.Len() - 1 + st := 0 + for st <= end { + mid := (st + end) / 2 + + s1 := val[mid] + if s1 <= s2 { + st = mid + 1 + } else { + end = mid - 1 + } + } + + return end +} + +func (d *dataTypeList) intAsOf(t DataType) int { + s2 := t.raw().(int32) + val := d.intData + end := d.Len() - 1 + st := 0 + for st <= end { + mid := (st + end) / 2 + + s1 := val[mid] + if s1 <= s2 { + st = mid + 1 + } else { + end = mid - 1 + } + } + + return end +} + +func (d *dataTypeList) longAsOf(t DataType) int { + s2 := t.raw().(int64) + val := d.longData + end := d.Len() - 1 + st := 0 + for st <= end { + mid := (st + end) / 2 + + s1 := val[mid] + if s1 <= s2 { + st = mid + 1 + } else { + end = mid - 1 + } + } + + return end +} + +func (d *dataTypeList) charAsOf(t DataType) int { + s2 := t.raw().(uint8) + val := d.charData + + end := d.Len() - 1 + st := 0 + for st <= end { + mid := (st + end) / 2 + + s1 := val[mid] + if s1 <= s2 { + st = mid + 1 + } else { + end = mid - 1 + } + } + + return end +} + +func (d *dataTypeList) doubleAsOf(t DataType) int { + s2 := t.raw().(float64) + val := d.doubleData + + end := d.Len() - 1 + st := 0 + for st <= end { + mid := (st + end) / 2 + + s1 := val[mid] + if s1 <= s2 { + st = mid + 1 + } else { + end = mid - 1 + } + } + + return end +} + +func (d *dataTypeList) floatAsOf(t DataType) int { + s2 := t.raw().(float32) + val := d.floatData + end := d.Len() - 1 + st := 0 + for st <= end { + mid := (st + end) / 2 + + s1 := val[mid] + if s1 <= s2 { + st = mid + 1 + } else { + end = mid - 1 + } + } + + return end +} + +func (d *dataTypeList) stringAsOf(t DataType) int { + s2 := t.raw().(string) + val := d.stringData + end := d.Len() - 1 + st := 0 + for st <= end { + mid := (st + end) / 2 + + s1 := val[mid] + if strings.Compare(s1, s2) <= 0 { + st = mid + 1 + } else { + end = mid - 1 + } + } + + return end +} + +func (d *dataTypeList) renderDuration(val interface{}) error { + str, ok := val.([]string) + if !ok { + return errors.New("the type of input must be []string when datatype is DtDuration") + } + + length := len(str) + d.count = length + d.durationData = make([]uint32, 0, 2*length) + for _, v := range str { + if v == "" { + d.durationData = append(d.durationData, emptyDuration[0], emptyDuration[1]) + } else { + tmp, err := renderDurationFromString(v) + if err != nil { + return err + } + + d.durationData = append(d.durationData, tmp[0], tmp[1]) + } + } + + return nil +} + +func (d *dataTypeList) renderDouble2(val interface{}) error { + f64s, ok := val.([][2]float64) + if !ok { + return errors.New("the type of input must be [][2]float64 when datatype is DtComplex or DtPoint") + } + + length := len(f64s) + d.count = length + d.double2Data = make([]float64, 0, 2*length) + for _, v := range f64s { + d.double2Data = append(d.double2Data, v[0], v[1]) + } + + return nil +} + +func (d *dataTypeList) renderBool(val interface{}) error { + bs, ok := val.([]byte) + if !ok { + return errors.New("the type of input must be []byte when datatype is DtBool") + } + + length := len(bs) + d.count = length + d.charData = make([]uint8, length) + for k, v := range bs { + if v == 0 || v == MinInt8 { + d.charData[k] = v + } else { + d.charData[k] = 1 + } + } + return nil +} + +func (d *dataTypeList) renderBlob(val interface{}) error { + byt, ok := val.([][]byte) + if !ok { + return errors.New("the type of input must be [][]byte when datatype is DtBlob") + } + + d.count = len(byt) + d.blobData = byt + return nil +} + +func (d *dataTypeList) renderByte(val interface{}) error { + bs, ok := val.([]byte) + if !ok { + return errors.New("the type of input must be []byte when datatype is DtChar or DtCompress") + } + + length := len(bs) + d.count = length + d.charData = bs + + return nil +} + +func (d *dataTypeList) renderDate(val interface{}) error { + ts, ok := val.([]time.Time) + if !ok { + return errors.New("the type of input must be []time.Time when datatype is DtDate") + } + + length := len(ts) + d.count = length + d.intData = make([]int32, length) + for k, v := range ts { + d.intData[k] = renderDateFromTime(v) + } + return nil +} + +func (d *dataTypeList) renderDateHour(val interface{}) error { + ts, ok := val.([]time.Time) + if !ok { + return errors.New("the type of input must be []time.Time when datatype is DtDateHour") + } + + length := len(ts) + d.count = length + d.intData = make([]int32, length) + for k, v := range ts { + d.intData[k] = renderDateHourFromTime(v) + } + + return nil +} + +func (d *dataTypeList) renderDateTime(val interface{}) error { + ts, ok := val.([]time.Time) + if !ok { + return errors.New("the type of input must be []time.Time when datatype is DtDatetime") + } + + length := len(ts) + d.count = length + d.intData = make([]int32, length) + for k, v := range ts { + d.intData[k] = renderDateTimeFromTime(v) + } + return nil +} + +func (d *dataTypeList) renderDouble(val interface{}) error { + f64s, ok := val.([]float64) + if !ok { + return errors.New("the type of input must be []float64 when datatype is DtDouble") + } + + d.count = len(f64s) + d.doubleData = f64s + return nil +} + +func (d *dataTypeList) renderFloat(val interface{}) error { + f32s, ok := val.([]float32) + if !ok { + return errors.New("the type of input must be []float32 when datatype is DtFloat") + } + + d.count = len(f32s) + d.floatData = f32s + return nil +} + +func (d *dataTypeList) renderInt(val interface{}) error { + i32s, ok := val.([]int32) + if !ok { + return errors.New("the type of input must be []int32 when datatype is DtInt") + } + + d.count = len(i32s) + d.intData = i32s + return nil +} + +func (d *dataTypeList) renderInt128(val interface{}) error { + str, ok := val.([]string) + if !ok { + return errors.New("the type of input must be []string when datatype is DtInt128") + } + + length := len(str) + d.count = length + d.long2Data = make([]uint64, 0, 2*length) + for _, v := range str { + tmp := renderInt128FromString(v) + d.long2Data = append(d.long2Data, tmp[0], tmp[1]) + } + + return nil +} + +func (d *dataTypeList) renderIP(val interface{}, bo protocol.ByteOrder) error { + str, ok := val.([]string) + if !ok { + return errors.New("the type of input must be []string when datatype is DtIP") + } + + length := len(str) + d.count = length + d.long2Data = make([]uint64, 0, 2*length) + for _, v := range str { + tmp := renderIPFromString(v, bo) + d.long2Data = append(d.long2Data, tmp[0], tmp[1]) + } + + return nil +} + +func (d *dataTypeList) renderLong(val interface{}) error { + is, ok := val.([]int64) + if !ok { + return errors.New("the type of input must be []int64 when datatype is DtLong") + } + + d.count = len(is) + d.longData = is + + return nil +} + +func (d *dataTypeList) renderMinute(val interface{}) error { + tis, ok := val.([]time.Time) + if !ok { + return errors.New("the type of input must be []time.Time when datatype is DtMinute") + } + + length := len(tis) + d.count = length + d.intData = make([]int32, length) + for k, v := range tis { + d.intData[k] = renderMinuteFromTime(v) + } + + return nil +} + +func (d *dataTypeList) renderMonth(val interface{}) error { + tis, ok := val.([]time.Time) + if !ok { + return errors.New("the type of input must be []time.Time when datatype is DtMonth") + } + + length := len(tis) + d.count = length + d.intData = make([]int32, length) + for k, v := range tis { + d.intData[k] = renderMonthFromTime(v) + } + + return nil +} + +func (d *dataTypeList) renderNanoTime(val interface{}) error { + tis, ok := val.([]time.Time) + if !ok { + return errors.New("the type of input must be []time.Time when datatype is DtNanoTime") + } + + length := len(tis) + d.count = length + d.longData = make([]int64, length) + for k, v := range tis { + d.longData[k] = renderNanoTimeFromTime(v) + } + + return nil +} + +func (d *dataTypeList) renderNanoTimestamp(val interface{}) error { + tis, ok := val.([]time.Time) + if !ok { + return errors.New("the type of input must be []time.Time when datatype is DtNanoTimestamp") + } + + length := len(tis) + d.count = length + d.longData = make([]int64, length) + for k, v := range tis { + d.longData[k] = renderNanoTimestampFromTime(v) + } + + return nil +} + +func (d *dataTypeList) renderSecond(val interface{}) error { + tis, ok := val.([]time.Time) + if !ok { + return errors.New("the type of input must be []time.Time when datatype is DtSecond") + } + + length := len(tis) + d.count = length + d.intData = make([]int32, length) + for k, v := range tis { + d.intData[k] = renderSecondFromTime(v) + } + + return nil +} + +func (d *dataTypeList) renderShort(val interface{}) error { + is, ok := val.([]int16) + if !ok { + return errors.New("the type of input must be []int16 when datatype is DtShort") + } + + length := len(is) + d.count = length + d.shortData = is + + return nil +} + +func (d *dataTypeList) renderTime(val interface{}) error { + tis, ok := val.([]time.Time) + if !ok { + return errors.New("the type of input must be []time.Time when datatype is DtTime") + } + + length := len(tis) + d.count = length + d.intData = make([]int32, length) + for k, v := range tis { + d.intData[k] = renderTimeFromTime(v) + } + + return nil +} + +func (d *dataTypeList) renderTimestamp(val interface{}) error { + tis, ok := val.([]time.Time) + if !ok { + return errors.New("the type of input must be []time.Time when datatype is DtTimestamp") + } + + length := len(tis) + d.count = length + d.longData = make([]int64, length) + for k, v := range tis { + d.longData[k] = renderTimestampFromTime(v) + } + + return nil +} + +func (d *dataTypeList) renderUUID(val interface{}) error { + str, ok := val.([]string) + if !ok { + return errors.New("the type of input must be []string when datatype is DtUuid") + } + + length := len(str) + d.count = length + d.long2Data = make([]uint64, 0, 2*length) + for _, v := range str { + tmp := renderUUIDFromString(v) + d.long2Data = append(d.long2Data, tmp[0], tmp[1]) + } + + return nil +} + +func (d *dataTypeList) renderAny(val interface{}) error { + dataForms, ok := val.([]DataForm) + if !ok { + return errors.New("the type of input must be []DataForm when datatype is DtAny") + } + + d.count = len(dataForms) + d.anyData = dataForms + + return nil +} + +func (d *dataTypeList) renderString(val interface{}) error { + str, ok := val.([]string) + if !ok { + return errors.New("the type of input must be []string when datatype is DtString, DtCode, DtFunction, DtHandle or DtSymbol") + } + + d.count = len(str) + d.stringData = str + + return nil +} + +func parseStrings(raw []string, res []interface{}) { + for k, v := range raw { + res[k] = v + } +} + +func parseUUIDs(count int, raw []uint64, res []interface{}, bo protocol.ByteOrder) { + for i := 0; i < count; i++ { + ind := 2 * i + if raw[ind] == 0 || raw[ind+1] == 0 { + res[i] = "00000000-0000-0000-0000-000000000000" + continue + } + + high, low := make([]byte, protocol.Uint64Size), make([]byte, protocol.Uint64Size) + bo.PutUint64(high, raw[ind+1]) + bo.PutUint64(low, raw[ind]) + + res[i] = fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", bo.Uint32(high[4:]), bo.Uint16(high[2:4]), + bo.Uint16(high[0:2]), bo.Uint16(low[6:8]), bo.Uint64(append(low[0:6], 0, 0))) + } +} + +func parseTimeStamps(raw []int64, res []interface{}) { + for k, v := range raw { + res[k] = parseTimeStamp(v) + } +} + +func parseTimes(raw []int32, res []interface{}) { + for k, v := range raw { + res[k] = parseTime(v) + } +} + +func parseShorts(raw []int16, res []interface{}) { + for k, v := range raw { + res[k] = v + } +} + +func parseComplexes(count int, raw []float64, res []interface{}) { + for i := 0; i < count; i++ { + ind := 2 * i + if raw[ind] == -math.MaxFloat64 || raw[ind+1] == -math.MaxFloat64 { + res[i] = "" + continue + } + + res[i] = fmt.Sprintf("%.5f+%.5fi", raw[ind], raw[ind+1]) + } +} + +func parsePoints(count int, raw []float64, res []interface{}) { + for i := 0; i < count; i++ { + ind := 2 * i + if raw[ind] == -math.MaxFloat64 || raw[ind+1] == -math.MaxFloat64 { + res[i] = emptyPoint + continue + } + + res[i] = fmt.Sprintf("(%.5f, %.5f)", raw[ind], raw[ind+1]) + } +} + +func parseNanoTimeStamps(raw []int64, res []interface{}) { + for k, v := range raw { + res[k] = parseNanoTimeStamp(v) + } +} + +func parseNanoTimes(raw []int64, res []interface{}) { + for k, v := range raw { + res[k] = parseNanoTime(v) + } +} + +func parseMonths(raw []int32, res []interface{}) { + for k, v := range raw { + res[k] = parseMonth(v) + } +} + +func parseMinutes(raw []int32, res []interface{}) { + for k, v := range raw { + res[k] = parseMinute(v) + } +} + +func parseSeconds(raw []int32, res []interface{}) { + for k, v := range raw { + res[k] = parseSecond(v) + } +} + +func parseLongs(raw []int64, res []interface{}) { + for k, v := range raw { + res[k] = v + } +} + +func parseIPs(count int, raw []uint64, res []interface{}, bo protocol.ByteOrder) { + for i := 0; i < count; i++ { + ind := 2 * i + if raw[ind] == 0 && raw[ind+1] == 0 { + res[i] = "0.0.0.0" + continue + } + + low := make([]byte, protocol.Uint64Size) + bo.PutUint64(low, raw[ind]) + if raw[ind+1] == 0 { + res[i] = fmt.Sprintf("%d.%d.%d.%d", low[3], low[2], low[1], low[0]) + continue + } + + high := make([]byte, protocol.Uint64Size) + bo.PutUint64(high, raw[ind+1]) + res[i] = fmt.Sprintf("%x:%x:%x:%x:%x:%x:%x:%x", bo.Uint16(high[6:8]), bo.Uint16(high[4:6]), bo.Uint16(high[2:4]), + bo.Uint16(high[0:2]), bo.Uint16(low[6:8]), bo.Uint16(low[4:6]), bo.Uint16(low[2:4]), bo.Uint16(low[0:2])) + } +} + +func parseAny(raw []DataForm, res []interface{}) { + for k, v := range raw { + if v == nil { + res[k] = nil + continue + } + + res[k] = v + } +} + +func parseInt128s(count int, raw []uint64, res []interface{}) { + for i := 0; i < count; i++ { + ind := 2 * i + if raw[ind] == 0 || raw[ind+1] == 0 { + res[i] = "00000000000000000000000000000000" + continue + } + + res[i] = fmt.Sprintf("%16x%16x", raw[ind+1], raw[ind]) + } +} + +func parseInt(raw []int32, res []interface{}) { + for k, v := range raw { + res[k] = v + } +} + +func parseDurations(count int, raw []uint32, res []interface{}) { + for i := 0; i < count; i++ { + ind := 2 * i + if raw[ind] == MinInt32 || raw[ind] == 0 { + res[i] = "" + continue + } + + unit := durationUnit[raw[ind+1]] + res[i] = fmt.Sprintf("%d%s", raw[ind], unit) + } +} + +func parseFloats(raw []float32, res []interface{}) { + for k, v := range raw { + res[k] = v + } +} + +func parseDoubles(raw []float64, res []interface{}) { + for k, v := range raw { + res[k] = v + } +} + +func parseDateTimes(raw []int32, res []interface{}) { + for k, v := range raw { + res[k] = parseDateTime(v) + } +} + +func parseDateHours(raw []int32, res []interface{}) { + for k, v := range raw { + res[k] = parseDateHour(v) + } +} + +func parseDates(raw []int32, res []interface{}) { + for k, v := range raw { + res[k] = parseDate(v) + } +} + +func parseBytes(raw []byte, res []interface{}) { + for k, v := range raw { + res[k] = int8(v) + } +} + +func parseBlobs(raw [][]byte, res []interface{}) { + for k, v := range raw { + res[k] = v + } +} + +func parseBools(raw []uint8, res []interface{}) { + for k, v := range raw { + if v == MinInt8 { + res[k] = int8(math.MinInt8) + } else { + res[k] = v == 1 + } + } +} diff --git a/model/datatype_list_test.go b/model/datatype_list_test.go new file mode 100644 index 0000000..6b17ee3 --- /dev/null +++ b/model/datatype_list_test.go @@ -0,0 +1,676 @@ +package model + +import ( + "bytes" + "testing" + "time" + + "github.com/dolphindb/api-go/dialer/protocol" + + "github.com/stretchr/testify/assert" +) + +func TestDataTypeList(t *testing.T) { + dt, err := NewDataType(DtInt, int32(10)) + assert.Nil(t, err) + + dtl := NewDataTypeList(DtInt, []DataType{dt}) + assert.Equal(t, dtl.DataType(), DtInt) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.AsOf(dt), 0) + + sl := dtl.StringList() + assert.Equal(t, sl, []string{"10"}) + + d := dtl.Get(0) + assert.Equal(t, d.DataType(), DtInt) + + dtl = dtl.Append(d) + assert.Equal(t, dtl.Len(), 2) + + sl = dtl.StringList() + assert.Equal(t, sl, []string{"10", "10"}) + + dtl = dtl.Sub(0, 1) + assert.Equal(t, dtl.Len(), 1) + + sl = dtl.StringList() + assert.Equal(t, sl, []string{"10"}) + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dt1, err := NewDataType(DtInt, int32(20)) + assert.Nil(t, err) + + err = dtl.Set(0, dt1) + assert.Nil(t, err) + + sl = dtl.StringList() + assert.Equal(t, sl, []string{"20"}) + + err = dtl.Set(1, dt1) + assert.Equal(t, err.Error(), "index 1 exceeds the number of data 1") + + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = dtl.Render(w, protocol.LittleEndian) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.String(), "\x14\x00\x00\x00") + + vct := NewVector(dtl) + dt, err = NewDataType(DtAny, vct) + assert.Nil(t, err) + + dtl = NewDataTypeList(DtAny, []DataType{dt}) + + sl = dtl.StringList() + assert.Equal(t, sl, []string{"vector([20])"}) + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl = NewEmptyDataTypeList(DtAny, 1) + sl = dtl.StringList() + assert.Equal(t, sl, []string{""}) + + dtl = NewEmptyDataTypeList(DtString, 1) + sl = dtl.StringList() + assert.Equal(t, sl, []string{""}) + + dt, err = NewDataType(DtString, "10") + assert.Nil(t, err) + err = dtl.Set(0, dt) + assert.Nil(t, err) + + str := dtl.ElementString(0) + assert.Equal(t, str, "10") + + dt1, err = NewDataType(DtString, "20") + assert.Nil(t, err) + + dtl.Append(dt1) + assert.Equal(t, dtl.AsOf(dt), 0) + assert.Equal(t, dtl.AsOf(dt1), 1) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "10") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtChar, []byte{0, 1}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 2) + assert.Equal(t, dtl.DataType(), DtChar) + + dt, err = NewDataType(DtChar, byte(1)) + assert.Nil(t, err) + assert.Equal(t, dtl.AsOf(dt), 1) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "0") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtInt, []int32{1, 2, 3, 4}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 4) + + sl = dtl.StringList() + assert.Equal(t, sl, []string{"1", "2", "3", "4"}) + + dtl = dtl.GetSubList([]int{1, 3}) + + sl = dtl.StringList() + assert.Equal(t, sl, []string{"2", "4"}) + + dtl, err = NewDataTypeListWithRaw(DtBool, []byte{1}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtBool) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "true") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtBlob, [][]byte{{0, 1, 1}}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtBlob) + + by.Reset() + err = dtl.Render(w, protocol.LittleEndian) + assert.Nil(t, err) + + w.Flush() + assert.Equal(t, by.Bytes(), []byte{0x3, 0x0, 0x0, 0x0, 0x0, 0x1, 0x1}) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "\x00\x01\x01") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dt, err = NewDataType(DtVoid, nil) + assert.Nil(t, err) + + dtl = NewDataTypeList(DtVoid, []DataType{dt}) + by.Reset() + err = dtl.Render(w, protocol.LittleEndian) + assert.Nil(t, err) + + w.Flush() + assert.Equal(t, by.Bytes(), []byte{0x0}) + + dtl, err = NewDataTypeListWithRaw(DtComplex, [][2]float64{{1, 1}}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtComplex) + + by.Reset() + err = dtl.Render(w, protocol.LittleEndian) + assert.Nil(t, err) + + w.Flush() + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f}) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "1.00000+1.00000i") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtDouble+64, []float64{1}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtDouble) + + by.Reset() + err = dtl.Render(w, protocol.LittleEndian) + assert.Nil(t, err) + + w.Flush() + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f}) + + dt, err = NewDataType(DtDouble, float64(1)) + assert.Nil(t, err) + assert.Equal(t, dtl.AsOf(dt), 0) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "1") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtTime, []time.Time{time.Date(2022, 5, 1, 2, 2, 2, 20, time.UTC)}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtTime) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "02:02:02.000") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtDateHour, []time.Time{time.Date(2022, 5, 1, 2, 2, 2, 20, time.UTC)}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtDateHour) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "2022.05.01T02") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtDate, []time.Time{time.Date(2022, 5, 1, 2, 2, 2, 20, time.UTC)}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtDate) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "2022.05.01") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtDatetime, []time.Time{time.Date(2022, 5, 1, 2, 2, 2, 20, time.UTC)}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtDatetime) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "2022.05.01T02:02:02") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtMinute, []time.Time{time.Date(2022, 5, 1, 2, 2, 2, 20, time.UTC)}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtMinute) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "02:02m") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtMonth, []time.Time{time.Date(2022, 5, 1, 2, 2, 2, 20, time.UTC)}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtMonth) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "2022.05M") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtNanoTime, []time.Time{time.Date(2022, 5, 1, 2, 2, 2, 20, time.UTC)}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtNanoTime) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "02:02:02.000000020") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtSecond, []time.Time{time.Date(2022, 5, 1, 2, 2, 2, 20, time.UTC)}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtSecond) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "02:02:02") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtTimestamp, []time.Time{time.Date(2022, 5, 1, 2, 2, 2, 20, time.UTC)}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtTimestamp) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "2022.05.01T02:02:02.000") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtNanoTimestamp, []time.Time{time.Date(2022, 5, 1, 2, 2, 2, 20, time.UTC)}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtNanoTimestamp) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "2022.05.01T02:02:02.000000020") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtUUID, []string{"e5eca940-5b99-45d0-bf1c-620f6b1b9d5b"}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtUUID) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "e5eca940-5b99-45d0-bf1c-620f6b1b9d5b") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "00000000-0000-0000-0000-000000000000") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtInt128, []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33"}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 2) + assert.Equal(t, dtl.DataType(), DtInt128) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "e1671797c52e15f763380b45e841ec32") + assert.Equal(t, sl[1], "e1671797c52e15f763380b45e841ec33") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "00000000000000000000000000000000") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtPoint, [][2]float64{{1, 1}}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtPoint) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "(1.00000, 1.00000)") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], emptyPoint) + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtDuration, []string{"10H"}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtDuration) + + by.Reset() + err = dtl.Render(w, protocol.LittleEndian) + assert.Nil(t, err) + + w.Flush() + assert.Equal(t, by.Bytes(), []byte{0xa, 0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0}) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "10H") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtIP, []string{"346b:6c2a:3347:d244:7654:5d5a:bcbb:5dc7"}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 1) + assert.Equal(t, dtl.DataType(), DtIP) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "346b:6c2a:3347:d244:7654:5d5a:bcbb:5dc7") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "0.0.0.0") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtIP, []string{"127.0.0.1", "127.0.0.2"}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 2) + assert.Equal(t, dtl.DataType(), DtIP) + + by.Reset() + err = dtl.Render(w, protocol.LittleEndian) + assert.Nil(t, err) + + w.Flush() + assert.Equal(t, by.Bytes(), []byte{0x1, 0x0, 0x0, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x7f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "127.0.0.1") + assert.Equal(t, sl[1], "127.0.0.2") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "0.0.0.0") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtFloat, []float32{1, 2}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 2) + assert.Equal(t, dtl.DataType(), DtFloat) + + by.Reset() + err = dtl.Render(w, protocol.LittleEndian) + assert.Nil(t, err) + + w.Flush() + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x80, 0x3f, 0x0, 0x0, 0x0, 0x40}) + + dt, err = NewDataType(DtFloat, float32(2.0)) + assert.Nil(t, err) + assert.Equal(t, dtl.AsOf(dt), 1) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "1") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtLong, []int64{1, 2}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 2) + assert.Equal(t, dtl.DataType(), DtLong) + + dt, err = NewDataType(DtLong, int64(2)) + assert.Nil(t, err) + assert.Equal(t, dtl.AsOf(dt), 1) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "1") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtShort, []int16{1, 2}) + assert.Nil(t, err) + assert.Equal(t, dtl.Len(), 2) + assert.Equal(t, dtl.DataType(), DtShort) + + dt, err = NewDataType(DtShort, int16(2.0)) + assert.Nil(t, err) + assert.Equal(t, dtl.AsOf(dt), 1) + + sl = dtl.StringList() + assert.Equal(t, sl[0], "1") + assert.False(t, dtl.IsNull(0)) + + dtl.SetNull(0) + sl = dtl.StringList() + assert.Equal(t, sl[0], "") + assert.True(t, dtl.IsNull(0)) + + _, err = NewDataTypeListWithRaw(DtLong, []int32{1, 2}) + assert.NotNil(t, err) + assert.Equal(t, err.Error(), "the type of input must be []int64 when datatype is DtLong") +} + +func TestNewDataTypeListWithRawWithNullValue(t *testing.T) { + dt, err := NewDataTypeListWithRaw(DtBool, []byte{1, NullBool}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtChar, []byte{97, NullChar}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtShort, []int16{1, NullShort}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtLong, []int64{1, NullLong}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtDate, []time.Time{originalTime, NullTime}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtMonth, []time.Time{originalTime, NullTime}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtTime, []time.Time{originalTime, NullTime}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtMinute, []time.Time{originalTime, NullTime}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtSecond, []time.Time{originalTime, NullTime}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtDatetime, []time.Time{originalTime, NullTime}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtTimestamp, []time.Time{originalTime, NullTime}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtNanoTime, []time.Time{originalTime, NullTime}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtNanoTimestamp, []time.Time{originalTime, NullTime}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtFloat, []float32{1.0, NullFloat}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtDouble, []float64{1.0, NullDouble}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtSymbol, []string{"sym", NullString}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtString, []string{"str", NullString}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtUUID, []string{"e5eca940-5b99-45d0-bf1c-620f6b1b9d5b", NullUUID}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "00000000-0000-0000-0000-000000000000") + + dt, err = NewDataTypeListWithRaw(DtAny, []DataForm{nil, NullAny}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtCompress, []byte{0, NullCompress}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtDateHour, []time.Time{originalTime, NullTime}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtIP, []string{"127.0.0.1", NullIP}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "0.0.0.0") + + dt, err = NewDataTypeListWithRaw(DtInt128, []string{"e1671797c52e15f763380b45e841ec32", NullInt128}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "00000000000000000000000000000000") + + dt, err = NewDataTypeListWithRaw(DtBlob, [][]byte{{0, 1}, NullBlob}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtComplex, [][2]float64{{1, 1}, NullComplex}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") + + dt, err = NewDataTypeListWithRaw(DtPoint, [][2]float64{{1, 1}, NullPoint}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), emptyPoint) + + dt, err = NewDataTypeListWithRaw(DtDuration, []string{"10m", NullDuration}) + assert.Nil(t, err) + assert.True(t, dt.IsNull(1)) + assert.Equal(t, dt.ElementString(1), "") +} diff --git a/model/datatype_test.go b/model/datatype_test.go new file mode 100644 index 0000000..8656dd2 --- /dev/null +++ b/model/datatype_test.go @@ -0,0 +1,511 @@ +package model + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestDatatype(t *testing.T) { + dt, err := NewDataType(DtVoid, nil) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtVoid) + + str := dt.String() + assert.Equal(t, str, "") + + dt, err = NewDataType(DtBool, byte(1)) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtBool) + + str = dt.String() + assert.Equal(t, str, "true") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dt, err = NewDataType(DtBlob, []byte{1, 2, 3, 4, 5}) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtBlob) + assert.Equal(t, dt.Value(), []byte{1, 2, 3, 4, 5}) + + str = dt.String() + assert.Equal(t, str, "\x01\x02\x03\x04\x05") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dt, err = NewDataType(DtChar, byte(97)) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtChar) + + str = dt.String() + assert.Equal(t, str, "97") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + buckets := []int{13, 43, 71, 97, 4097} + v, b := -127, -128 + dl, _ := NewDataTypeListWithRaw(DtChar, []uint8{127, uint8(v), 12, 0, uint8(b)}) + expectCharHashBuckets := []int{10, 12, 12, 0, -1, 41, 18, 12, 0, -1, 56, 24, 12, 0, -1, 30, 5, 12, 0, -1, 127, 129, 12, 0, -1} + count := 0 + for i := 0; i < 5; i++ { + for j := 0; j < 5; j++ { + assert.Equal(t, dl.Get(j).HashBucket(buckets[i]), expectCharHashBuckets[count]) + count++ + } + } + + dt, err = NewDataType(DtComplex, [2]float64{1, 1}) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtComplex) + + str = dt.String() + assert.Equal(t, str, "1.00000+1.00000i") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + ti := time.Date(1968, 11, 1, 23, 59, 59, 154140487, time.UTC) + dt, err = NewDataType(DtDate, ti) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtDate) + + str = dt.String() + assert.Equal(t, str, "1968.11.01") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + ti = time.Date(1968, 11, 1, 23, 59, 59, 154140487, time.UTC) + dt, err = NewDataType(DtDateHour, ti) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtDateHour) + + str = dt.String() + assert.Equal(t, str, "1968.11.01T23") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + ti = time.Date(1968, 11, 1, 23, 59, 59, 154140487, time.UTC) + dt, err = NewDataType(DtDatetime, ti) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtDatetime) + + str = dt.String() + assert.Equal(t, str, "1968.11.01T23:59:59") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dt, err = NewDataType(DtDouble+64, float64(1)) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtDouble) + + str = dt.String() + assert.Equal(t, str, "1") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dt, err = NewDataType(DtFloat, float32(1.0)) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtFloat) + + str = dt.String() + assert.Equal(t, str, "1") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dt, err = NewDataType(DtDuration, "10H") + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtDuration) + + str = dt.String() + assert.Equal(t, str, "10H") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dt, err = NewDataType(DtInt, int32(10)) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtInt) + + str = dt.String() + assert.Equal(t, str, "10") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dl, _ = NewDataTypeListWithRaw(DtInt, []int32{2147483647, -2147483647, 99, 0, -12}) + expectIntHashBuckets := []int{10, 12, 8, 0, 10, 7, 9, 13, 0, 4, 39, 41, 28, 0, 68, 65, 67, 2, 0, 23, 127, 129, 99, 0, 244} + count = 0 + for i := 0; i < 5; i++ { + for j := 0; j < 5; j++ { + assert.Equal(t, dl.Get(j).HashBucket(buckets[i]), expectIntHashBuckets[count]) + count++ + } + } + + dt, err = NewDataType(DtInt128, "e1671797c52e15f763380b45e841ec32") + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtInt128) + + str = dt.String() + assert.Equal(t, str, "e1671797c52e15f763380b45e841ec32") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "00000000000000000000000000000000") + + dl, _ = NewDataTypeListWithRaw(DtInt128, []string{"4b7545dc735379254fbf804dec34977f", "6f29ffbf80722c9fd386c6e48ca96340", "dd92685907f08a99ec5f8235c15a1588", + "4f5387611b41d1385e272e6e866f862d", "130d6d5a0536c99ac7f9a01363b107c0"}) + expectInt128HashBuckets := []int{11, 6, 2, 3, 6, 42, 6, 30, 10, 32, 7, 47, 48, 31, 44, 15, 45, 75, 49, 44, 1116, 3479, 4032, 2053, 3150} + count = 0 + for i := 0; i < 5; i++ { + for j := 0; j < 5; j++ { + assert.Equal(t, dl.Get(j).HashBucket(buckets[i]), expectInt128HashBuckets[count]) + count++ + } + } + + dt, err = NewDataType(DtIP, "346b:6c2a:3347:d244:7654:5d5a:bcbb:5dc7") + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtIP) + + str = dt.String() + assert.Equal(t, str, "346b:6c2a:3347:d244:7654:5d5a:bcbb:5dc7") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "0.0.0.0") + + dt, err = NewDataType(DtLong, int64(1)) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtLong) + + str = dt.String() + assert.Equal(t, str, "1") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dl, _ = NewDataTypeListWithRaw(DtLong, []int64{9223372036854775807, -9223372036854775807, 12, 0, -12}) + expectLongHashBuckets := []int{7, 9, 12, 0, 4, 41, 0, 12, 0, 29, 4, 6, 12, 0, 69, 78, 80, 12, 0, 49, 4088, 4090, 12, 0, 4069} + count = 0 + for i := 0; i < 5; i++ { + for j := 0; j < 5; j++ { + assert.Equal(t, dl.Get(j).HashBucket(buckets[i]), expectLongHashBuckets[count]) + count++ + } + } + + ti = time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC).Add(100 * time.Hour) + dt, err = NewDataType(DtMinute, ti) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtMinute) + + str = dt.String() + assert.Equal(t, str, "04:00m") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dt, err = NewDataType(DtMonth, ti) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtMonth) + + str = dt.String() + assert.Equal(t, str, "1970.01M") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + ti = time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC).Add(100 * time.Hour) + dt, err = NewDataType(DtNanoTime, ti) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtNanoTime) + + str = dt.String() + assert.Equal(t, str, "04:00:00.000000000") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + ti = time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC).Add(100 * time.Hour) + dt, err = NewDataType(DtNanoTimestamp, ti) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtNanoTimestamp) + + str = dt.String() + assert.Equal(t, str, "1970.01.05T04:00:00.000000000") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dt, err = NewDataType(DtPoint, [2]float64{1, 1}) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtPoint) + + str = dt.String() + assert.Equal(t, str, "(1.00000, 1.00000)") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, emptyPoint) + + ti = time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC).Add(100 * time.Hour) + dt, err = NewDataType(DtSecond, ti) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtSecond) + + str = dt.String() + assert.Equal(t, str, "04:00:00") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dt, err = NewDataType(DtShort, int16(10)) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtShort) + + str = dt.String() + assert.Equal(t, str, "10") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dl, _ = NewDataTypeListWithRaw(DtShort, []int16{32767, -32767, 12, 0, -12}) + expectShortHashBuckets := []int{7, 2, 12, 0, 10, 1, 15, 12, 0, 4, 36, 44, 12, 0, 68, 78, 54, 12, 0, 23, 4088, 265, 12, 0, 244} + count = 0 + for i := 0; i < 5; i++ { + for j := 0; j < 5; j++ { + assert.Equal(t, dl.Get(j).HashBucket(buckets[i]), expectShortHashBuckets[count]) + count++ + } + } + + ti = time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC).Add(100 * time.Hour) + dt, err = NewDataType(DtTime, ti) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtTime) + + str = dt.String() + assert.Equal(t, str, "04:00:00.000") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + ti = time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC).Add(100 * time.Hour) + dt, err = NewDataType(DtTimestamp, ti) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtTimestamp) + + str = dt.String() + assert.Equal(t, str, "1970.01.05T04:00:00.000") + + s := NewScalar(dt) + dt, err = NewDataType(DtAny, s) + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtAny) + + str = dt.String() + assert.Equal(t, str, "timestamp(1970.01.05T04:00:00.000)") + assert.Equal(t, str, dt.String()) + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dt, err = NewDataType(DtSymbol, "datatype") + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtSymbol) + + str = dt.String() + assert.Equal(t, str, "datatype") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "") + + dl, _ = NewDataTypeListWithRaw(DtString, []string{"!@#$%^&*()", "我是中文测试内容", "我是!@#$%^中文&*()", "e1281ls.zxl.d.,cxnv./';'sla", "abckdlskdful", ""}) + expectStringHashBuckets := []int{8, 11, 9, 12, 1, 0, 25, 3, 40, 28, 18, 0, 31, 14, 49, 8, 48, 0, 52, 92, 54, 4, 47, 0, 3892, 1574, 148, 3118, 1732, 0} + count = 0 + for i := 0; i < 5; i++ { + for j := 0; j < 6; j++ { + assert.Equal(t, dl.Get(j).HashBucket(buckets[i]), expectStringHashBuckets[count]) + count++ + } + } + + dt, err = NewDataType(DtUUID, "e5eca940-5b99-45d0-bf1c-620f6b1b9d5b") + assert.Nil(t, err) + assert.Equal(t, dt.DataType(), DtUUID) + + str = dt.String() + assert.Equal(t, str, "e5eca940-5b99-45d0-bf1c-620f6b1b9d5b") + + dt.SetNull() + str = dt.String() + assert.Equal(t, str, "00000000-0000-0000-0000-000000000000") +} + +func TestNewDataTypeWithNullValue(t *testing.T) { + dt, err := NewDataType(DtBool, NullBool) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtChar, NullChar) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtShort, NullShort) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtLong, NullLong) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtDate, NullTime) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtMonth, NullTime) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtTime, NullTime) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtMinute, NullTime) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtSecond, NullTime) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtDatetime, NullTime) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtTimestamp, NullTime) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtNanoTime, NullTime) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtNanoTimestamp, NullTime) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtFloat, NullFloat) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtDouble, NullDouble) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtSymbol, NullString) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtString, NullString) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtUUID, NullUUID) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "00000000-0000-0000-0000-000000000000") + + dt, err = NewDataType(DtAny, NullAny) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtCompress, NullCompress) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtDateHour, NullTime) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtIP, NullIP) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "0.0.0.0") + + dt, err = NewDataType(DtInt128, NullInt128) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "00000000000000000000000000000000") + + dt, err = NewDataType(DtBlob, NullBlob) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtComplex, NullComplex) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") + + dt, err = NewDataType(DtPoint, NullPoint) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), emptyPoint) + + dt, err = NewDataType(DtDuration, NullDuration) + assert.Nil(t, err) + assert.True(t, dt.IsNull()) + assert.Equal(t, dt.String(), "") +} diff --git a/model/dictionary.go b/model/dictionary.go new file mode 100644 index 0000000..90f24de --- /dev/null +++ b/model/dictionary.go @@ -0,0 +1,160 @@ +package model + +import ( + "errors" + "fmt" + "strings" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +// Dictionary is a DataForm. +// Refer to https://www.dolphindb.com/help/DataTypesandStructures/DataForms/Dictionary.html for more details. +type Dictionary struct { + category *Category + + Keys *Vector + Values *Vector +} + +// NewDictionary returns an object of Dictionary according to keys and values. +// You can instantiate the Vector object by NewVector. +func NewDictionary(keys, val *Vector) *Dictionary { + return &Dictionary{ + category: &Category{ + DataForm: DfDictionary, + DataType: val.GetDataType(), + }, + Keys: keys, + Values: val, + } +} + +// Rows returns the row num of the DataForm. +func (dict *Dictionary) Rows() int { + return int(dict.Keys.RowCount) +} + +// GetDataForm returns the byte type of the DataForm. +func (dict *Dictionary) GetDataForm() DataFormByte { + return DfDictionary +} + +// Render serializes the DataForm with bo and input it into w. +func (dict *Dictionary) Render(w *protocol.Writer, bo protocol.ByteOrder) error { + err := dict.category.render(w) + if err != nil { + return err + } + + if dict.Keys != nil { + err = dict.Keys.Render(w, bo) + if err != nil { + return err + } + } + + if dict.Values != nil { + err = dict.Values.Render(w, bo) + } + + return err +} + +// GetDataType returns the byte type of the DataType. +func (dict *Dictionary) GetDataType() DataTypeByte { + return dict.category.DataType +} + +// GetDataTypeString returns the string format of the DataType. +func (dict *Dictionary) GetDataTypeString() string { + return GetDataTypeString(dict.category.DataType) +} + +// Get returns the value in dictionary based on the specified key. +func (dict *Dictionary) Get(key string) (DataType, error) { + if dict.Keys == nil || dict.Keys.Data == nil || + dict.Values == nil || dict.Values.Data == nil { + return nil, errors.New("empty dictionary") + } + + keys := dict.Keys.Data.StringList() + + ind := -1 + for k, v := range keys { + if v == key { + ind = k + break + } + } + + if ind < 0 { + return nil, fmt.Errorf("invalid key: %s", key) + } + + d := dict.Values.Data.Get(ind) + if d == nil { + return nil, fmt.Errorf("invalid key: %s", key) + } + + return d, nil +} + +// Set sets the key and value of a dictionary. +// If a key already exists, update the value, otherwise append the key-value pair. +func (dict *Dictionary) Set(key, value DataType) { + if dict.Keys == nil || dict.Keys.Data == nil || + dict.Values == nil || dict.Values.Data == nil { + return + } + + keyStr := key.String() + if _, err := dict.Get(keyStr); err == nil { + keys := dict.Keys.Data.StringList() + ind := -1 + for k, v := range keys { + if v == keyStr { + ind = k + break + } + } + + _ = dict.Values.Data.Set(ind, value) + return + } + + dict.Keys.Data.Append(key) + dict.Values.Data.Append(value) +} + +// KeyStrings returns the string list of dictionary keys. +func (dict *Dictionary) KeyStrings() []string { + return dict.Keys.Data.StringList() +} + +func (dict *Dictionary) String() string { + if dict.Keys == nil || dict.Keys.Data == nil || + dict.Values == nil || dict.Values.Data == nil { + return "" + } + keyType := GetDataTypeString(dict.Keys.Data.DataType()) + valType := GetDataTypeString(dict.Values.Data.DataType()) + + by := strings.Builder{} + by.WriteString(fmt.Sprintf("dict<%s, %s>([\n", keyType, valType)) + + var val []string + if dict.Keys != nil && dict.Keys.Data != nil { + val = dict.Keys.formatString() + by.WriteString(fmt.Sprintf(" %s[%d]([%s]),\n", keyType, dict.Keys.RowCount, strings.Join(val, ", "))) + } + + if dict.Values != nil && dict.Values.Data != nil { + val = dict.Values.formatString() + by.WriteString(fmt.Sprintf(" %s[%d]([%s]),\n", valType, dict.Keys.RowCount, strings.Join(val, ", "))) + } + + by.WriteString("])") + + return by.String() +} diff --git a/model/dictionary_test.go b/model/dictionary_test.go new file mode 100644 index 0000000..21739c2 --- /dev/null +++ b/model/dictionary_test.go @@ -0,0 +1,56 @@ +package model + +import ( + "bytes" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/stretchr/testify/assert" +) + +const dictExpect = "dict([\n string[3]([key1, key2, key3]),\n string[3]([value1, value2, value3]),\n])" + +func TestDictionary(t *testing.T) { + keys, err := NewDataTypeListWithRaw(DtString, []string{"key1", "key2", "key3"}) + assert.Nil(t, err) + + values, err := NewDataTypeListWithRaw(DtString, []string{"value1", "value2", "value3"}) + assert.Nil(t, err) + + dict := NewDictionary(NewVector(keys), NewVector(values)) + assert.Equal(t, dict.GetDataForm(), DfDictionary) + assert.Equal(t, dict.GetDataType(), DtString) + assert.Equal(t, dict.GetDataTypeString(), "string") + assert.Equal(t, dict.Rows(), 3) + assert.Equal(t, dict.KeyStrings(), []string{"key1", "key2", "key3"}) + + v, err := dict.Get("key1") + assert.Nil(t, err) + + s := v.String() + assert.Equal(t, s, "value1") + + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = dict.Render(w, protocol.LittleEndian) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.String(), "\x12\x05\x12\x01\x03\x00\x00\x00\x01\x00\x00\x00key1\x00key2\x00key3\x00\x12\x01\x03\x00\x00\x00\x01\x00\x00\x00value1\x00value2\x00value3\x00") + assert.Equal(t, dict.String(), dictExpect) + + k, err := NewDataType(DtString, "key4") + assert.Nil(t, err) + + v, err = NewDataType(DtString, "value4") + assert.Nil(t, err) + + dict.Set(k, v) + v, err = dict.Get("key4") + assert.Nil(t, err) + assert.Equal(t, v.String(), "value4") + + dict.Set(k, values.Get(2)) + v, err = dict.Get("key4") + assert.Nil(t, err) + assert.Equal(t, v.String(), "value3") +} diff --git a/model/io.go b/model/io.go new file mode 100644 index 0000000..d124e70 --- /dev/null +++ b/model/io.go @@ -0,0 +1,626 @@ +package model + +import ( + "math" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +func (d *dataType) Render(w *protocol.Writer, bo protocol.ByteOrder) error { + var err error + + switch d.t { + case DtString, DtCode, DtFunction, DtHandle, DtDictionary, DtSymbol: + err = writeString(w, d.data.(string)) + case DtBlob: + err = writeBlob(w, bo, d.data.([]byte)) + case DtAny: + err = d.data.(DataForm).Render(w, bo) + case DtBool, DtChar, DtCompress: + err = w.WriteByte(d.data.(byte)) + case DtInt, DtTime, DtDate, DtMonth, DtMinute, DtSecond, DtDatetime, DtDateHour: + err = writeInt(w, bo, d.data.(int32)) + case DtShort: + err = writeShort(w, bo, d.data.(int16)) + case DtVoid: + err = w.WriteByte(0) + case DtDouble: + err = writeDouble(w, bo, d.data.(float64)) + case DtFloat: + err = writeFloat(w, bo, d.data.(float32)) + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + err = writeLong(w, bo, d.data.(int64)) + case DtDuration: + err = writeDuration(w, bo, d.data.([2]uint32)) + case DtPoint, DtComplex: + err = writeDouble2(w, bo, d.data.([2]float64)) + case DtInt128, DtUUID, DtIP: + err = writeLong2(w, bo, d.data.([2]uint64)) + } + + return err +} + +func writeInt(w *protocol.Writer, bo protocol.ByteOrder, data int32) error { + buf := make([]byte, protocol.Uint32Size) + bo.PutUint32(buf, uint32(data)) + return w.Write(buf) +} + +func writeShort(w *protocol.Writer, bo protocol.ByteOrder, data int16) error { + buf := make([]byte, protocol.Uint16Size) + bo.PutUint16(buf, uint16(data)) + return w.Write(buf) +} + +func writeLong(w *protocol.Writer, bo protocol.ByteOrder, data int64) error { + buf := make([]byte, protocol.Uint64Size) + bo.PutUint64(buf, uint64(data)) + return w.Write(buf) +} + +func writeVoids(w *protocol.Writer, count int) error { + buf := make([]byte, count) + for i := 0; i < count; i++ { + buf[i] = byte(0) + } + return w.Write(buf) +} + +func writeFloat(w *protocol.Writer, bo protocol.ByteOrder, data float32) error { + buf := make([]byte, protocol.Uint32Size) + bo.PutUint32(buf, math.Float32bits(data)) + return w.Write(buf) +} + +func writeDouble(w *protocol.Writer, bo protocol.ByteOrder, data float64) error { + buf := make([]byte, protocol.Uint64Size) + bo.PutUint64(buf, math.Float64bits(data)) + return w.Write(buf) +} + +func writeDuration(w *protocol.Writer, bo protocol.ByteOrder, du [2]uint32) error { + buf := make([]byte, protocol.Uint64Size) + bo.PutUint32(buf, du[0]) + bo.PutUint32(buf[4:], du[1]) + return w.Write(buf) +} + +func writeDurations(w *protocol.Writer, du []uint32) error { + return w.Write(protocol.ByteSliceFromUint32Slice(du)) +} + +func writeDouble2(w *protocol.Writer, bo protocol.ByteOrder, du [2]float64) error { + buf := make([]byte, protocol.TwoUint64Size) + bo.PutUint64(buf, math.Float64bits(du[0])) + bo.PutUint64(buf[8:], math.Float64bits(du[1])) + return w.Write(buf) +} + +func writeDouble2s(w *protocol.Writer, du []float64) error { + return w.Write(protocol.ByteSliceFromFloat64Slice(du)) +} + +func writeLong2(w *protocol.Writer, bo protocol.ByteOrder, du [2]uint64) error { + buf := make([]byte, protocol.TwoUint64Size) + bo.PutUint64(buf, du[0]) + bo.PutUint64(buf[8:], du[1]) + return w.Write(buf) +} + +func writeLong2s(w *protocol.Writer, du []uint64) error { + return w.Write(protocol.ByteSliceFromUint64Slice(du)) +} + +func writeString(w *protocol.Writer, str string) error { + if err := w.WriteString(str); err != nil { + return err + } + + return w.WriteByte(protocol.StringSep) +} + +func writeStrings(w *protocol.Writer, str []string) error { + for _, v := range str { + err := writeString(w, v) + if err != nil { + return err + } + } + + return nil +} + +func writeBlob(w *protocol.Writer, bo protocol.ByteOrder, byt []byte) error { + length := len(byt) + + buf := make([]byte, 4) + bo.PutUint32(buf, uint32(length)) + + if err := w.Write(buf); err != nil { + return err + } + + return w.Write(byt) +} + +func writeBlobs(w *protocol.Writer, blobData [][]byte) error { + ind := 0 + buf := make([]byte, 4) + for _, v := range blobData { + protocol.LittleEndian.PutUint32(buf, uint32(len(v))) + err := w.Write(buf) + if err != nil { + return err + } + err = w.Write(v) + if err != nil { + return err + } + + ind += 4 + } + + return nil +} + +func readDataType(r protocol.Reader, t DataTypeByte, bo protocol.ByteOrder) (DataType, error) { + return read(r, t, bo) +} + +func read(r protocol.Reader, t DataTypeByte, bo protocol.ByteOrder) (*dataType, error) { + var err error + dt := &dataType{ + t: t, + bo: bo, + } + + switch t { + case DtVoid, DtBool, DtChar: + dt.data, err = r.ReadByte() + case DtShort: + dt.data, err = readShort(r, bo) + case DtFloat: + dt.data, err = readFloat(r, bo) + case DtDouble: + dt.data, err = readDouble(r, bo) + case DtDuration: + dt.data, err = readDuration(r, bo) + case DtInt, DtDate, DtMonth, DtTime, DtMinute, DtSecond, DtDatetime, DtDateHour, DtDateMinute: + dt.data, err = readInt(r, bo) + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + dt.data, err = readLong(r, bo) + case DtInt128, DtIP, DtUUID: + dt.data, err = readLong2(r, bo) + case DtComplex, DtPoint: + dt.data, err = readDouble2(r, bo) + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + dt.data, err = readString(r) + case DtBlob: + dt.data, err = readBlob(r, bo) + case DtAny: + dt.data, err = ParseDataForm(r, bo) + } + + return dt, err +} + +func readShort(r protocol.Reader, bo protocol.ByteOrder) (int16, error) { + buf, err := r.ReadCertainBytes(2) + if err != nil { + return 0, err + } + + return int16(bo.Uint16(buf)), nil +} + +func readShortsWithLittleEndian(count int, r protocol.Reader) ([]int16, error) { + buf, err := r.ReadCertainBytes(2 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + return protocol.Int16SliceFromByteSlice(buf), nil +} + +func readShortsWithBigEndian(count int, r protocol.Reader, bo protocol.ByteOrder) ([]int16, error) { + buf, err := r.ReadCertainBytes(2 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + res := make([]int16, count) + ind := 0 + for i := 0; i < count; i++ { + res[i] = int16(bo.Uint16(buf[ind : ind+2])) + ind += 2 + } + + return res, nil +} + +func readInt(r protocol.Reader, bo protocol.ByteOrder) (int32, error) { + buf, err := r.ReadCertainBytes(4) + if err != nil { + return 0, err + } + + return int32(bo.Uint32(buf)), nil +} + +func readIntWithLittleEndian(count int, r protocol.Reader) ([]int32, error) { + buf, err := r.ReadCertainBytes(4 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + return protocol.Int32SliceFromByteSlice(buf), nil +} + +func readIntWithBigEndian(count int, r protocol.Reader, bo protocol.ByteOrder) ([]int32, error) { + buf, err := r.ReadCertainBytes(4 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + res := make([]int32, count) + ind := 0 + for i := 0; i < count; i++ { + res[i] = int32(bo.Uint32(buf[ind : ind+4])) + ind += 4 + } + + return res, nil +} + +func readLong(r protocol.Reader, bo protocol.ByteOrder) (int64, error) { + buf, err := r.ReadCertainBytes(8) + if err != nil { + return 0, err + } + + return int64(bo.Uint64(buf)), nil +} + +func readLongsWithLittleEndian(count int, r protocol.Reader) ([]int64, error) { + buf, err := r.ReadCertainBytes(8 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + return protocol.Int64SliceFromByteSlice(buf), nil +} + +func readLongsWithBigEndian(count int, r protocol.Reader, bo protocol.ByteOrder) ([]int64, error) { + buf, err := r.ReadCertainBytes(8 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + res := make([]int64, count) + ind := 0 + for i := 0; i < count; i++ { + res[i] = int64(bo.Uint64(buf[ind : ind+8])) + ind += 8 + } + + return res, nil +} + +func readFloat(r protocol.Reader, bo protocol.ByteOrder) (float32, error) { + buf, err := r.ReadCertainBytes(4) + if err != nil { + return 0, err + } + + return math.Float32frombits(bo.Uint32(buf)), nil +} + +func readFloatsWithLittleEndian(count int, r protocol.Reader) ([]float32, error) { + buf, err := r.ReadCertainBytes(4 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + return protocol.Float32SliceFromByteSlice(buf), nil +} + +func readFloatsWithBigEndian(count int, r protocol.Reader, bo protocol.ByteOrder) ([]float32, error) { + res := make([]float32, count) + buf, err := r.ReadCertainBytes(4 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + ind := 0 + for i := 0; i < count; i++ { + res[i] = math.Float32frombits(bo.Uint32(buf[ind : ind+4])) + ind += 4 + } + + return res, nil +} + +func readDouble(r protocol.Reader, bo protocol.ByteOrder) (float64, error) { + buf, err := r.ReadCertainBytes(8) + if err != nil { + return 0, err + } + + return math.Float64frombits(bo.Uint64(buf)), nil +} + +func readDoublesWithBigEndian(count int, r protocol.Reader, bo protocol.ByteOrder) ([]float64, error) { + res := make([]float64, count) + buf, err := r.ReadCertainBytes(8 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + ind := 0 + for i := 0; i < count; i++ { + res[i] = math.Float64frombits(bo.Uint64(buf[ind : ind+8])) + ind += 8 + } + + return res, nil +} + +func readDoublesWithLittleEndian(count int, r protocol.Reader) ([]float64, error) { + buf, err := r.ReadCertainBytes(8 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + return protocol.Float64SliceFromByteSlice(buf), nil +} + +func readDuration(r protocol.Reader, bo protocol.ByteOrder) ([2]uint32, error) { + buf, err := r.ReadCertainBytes(8) + if err != nil { + return [2]uint32{}, err + } + + return [2]uint32{ + bo.Uint32(buf), + bo.Uint32(buf[4:]), + }, nil +} + +func readDurationsWithLittleEndian(count int, r protocol.Reader) ([]uint32, error) { + buf, err := r.ReadCertainBytes(8 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + return protocol.Uint32SliceFromByteSlice(buf), nil +} + +func readDurationsWithBigEndian(count int, r protocol.Reader, bo protocol.ByteOrder) ([]uint32, error) { + res := make([]uint32, 0, count) + buf, err := r.ReadCertainBytes(8 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + ind := 0 + for i := 0; i < count; i++ { + res = append(res, bo.Uint32(buf[ind:ind+4]), bo.Uint32(buf[ind+4:ind+8])) + ind += 8 + } + + return res, nil +} + +func readDouble2(r protocol.Reader, bo protocol.ByteOrder) ([2]float64, error) { + buf, err := r.ReadCertainBytes(protocol.TwoUint64Size) + if err != nil { + return [2]float64{}, err + } + + return [2]float64{ + math.Float64frombits(bo.Uint64(buf)), + math.Float64frombits(bo.Uint64(buf[8:])), + }, nil +} + +func readDouble2sWithLittleEndian(count int, r protocol.Reader) ([]float64, error) { + buf, err := r.ReadCertainBytes(16 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + return protocol.Float64SliceFromByteSlice(buf), nil +} + +func readDouble2sWithBigEndian(count int, r protocol.Reader, bo protocol.ByteOrder) ([]float64, error) { + res := make([]float64, 0, count) + buf, err := r.ReadCertainBytes(16 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + ind := 0 + for i := 0; i < count; i++ { + res = append(res, math.Float64frombits(bo.Uint64(buf[ind:ind+8])), math.Float64frombits(bo.Uint64(buf[ind+8:ind+16]))) + ind += 16 + } + + return res, nil +} + +func readLong2(r protocol.Reader, bo protocol.ByteOrder) ([2]uint64, error) { + buf, err := r.ReadCertainBytes(16) + if err != nil { + return [2]uint64{}, err + } + + return [2]uint64{ + bo.Uint64(buf), + bo.Uint64(buf[8:]), + }, nil +} + +func readLong2sWithLittleEndian(count int, r protocol.Reader) ([]uint64, error) { + buf, err := r.ReadCertainBytes(16 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + return protocol.Uint64SliceFromByteSlice(buf), nil +} + +func readLong2sWithBigEndian(count int, r protocol.Reader, bo protocol.ByteOrder) ([]uint64, error) { + res := make([]uint64, 0, count) + buf, err := r.ReadCertainBytes(16 * count) + if err != nil || len(buf) == 0 { + return nil, err + } + + ind := 0 + for i := 0; i < count; i++ { + res = append(res, bo.Uint64(buf[ind:ind+8]), bo.Uint64(buf[ind+8:ind+16])) + ind += 16 + } + + return res, nil +} + +func readString(r protocol.Reader) (string, error) { + byt, err := r.ReadBytes(protocol.StringSep) + if err != nil || len(byt) == 0 { + return "", err + } + + return protocol.StringFromByteSlice(byt), nil +} + +func readStrings(count int, r protocol.Reader) ([]string, error) { + res := make([]string, count) + for i := 0; i < count; i++ { + byt, err := r.ReadBytes(protocol.StringSep) + if err != nil { + return nil, err + } + + if len(byt) == 0 { + res[i] = "" + } else { + res[i] = protocol.StringFromByteSlice(byt) + } + } + + return res, nil +} + +func readBlob(r protocol.Reader, bo protocol.ByteOrder) ([]byte, error) { + bs, err := r.ReadCertainBytes(4) + if err != nil { + return nil, err + } + + length := bo.Uint32(bs) + if length == 0 { + return nil, nil + } + + return r.ReadCertainBytes(int(length)) +} + +func readBlobs(count int, r protocol.Reader, bo protocol.ByteOrder) ([][]byte, error) { + buf := protocol.NewBuffer(count, r) + return buf.ReadBlobs(bo) +} + +func readAny(count int, r protocol.Reader, bo protocol.ByteOrder) ([]DataForm, error) { + var err error + res := make([]DataForm, count) + for i := 0; i < count; i++ { + res[i], err = ParseDataForm(r, bo) + if err != nil { + return nil, err + } + } + + return res, nil +} + +func readList(r protocol.Reader, t DataTypeByte, bo protocol.ByteOrder, count int) (DataTypeList, error) { + dt := &dataTypeList{ + t: t, + count: count, + bo: bo, + } + + if bo == protocol.LittleEndian { + err := dt.littleEndianRead(count, r) + return dt, err + } + + err := dt.bigEndianRead(count, r) + return dt, err +} + +func (d *dataTypeList) littleEndianRead(count int, r protocol.Reader) error { + var err error + switch d.t { + case DtVoid, DtBool, DtChar: + d.charData, err = r.ReadCertainBytes(count) + case DtShort: + d.shortData, err = readShortsWithLittleEndian(count, r) + case DtFloat: + d.floatData, err = readFloatsWithLittleEndian(count, r) + case DtDouble: + d.doubleData, err = readDoublesWithLittleEndian(count, r) + case DtDuration: + d.durationData, err = readDurationsWithLittleEndian(count, r) + case DtInt, DtDate, DtMonth, DtTime, DtMinute, DtSecond, DtDatetime, DtDateHour, DtDateMinute: + d.intData, err = readIntWithLittleEndian(count, r) + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + d.longData, err = readLongsWithLittleEndian(count, r) + case DtInt128, DtIP, DtUUID: + d.long2Data, err = readLong2sWithLittleEndian(count, r) + case DtComplex, DtPoint: + d.double2Data, err = readDouble2sWithLittleEndian(count, r) + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + d.stringData, err = readStrings(count, r) + case DtBlob: + d.blobData, err = readBlobs(count, r, d.bo) + case DtAny: + d.anyData, err = readAny(count, r, d.bo) + } + + return err +} + +func (d *dataTypeList) bigEndianRead(count int, r protocol.Reader) error { + var err error + switch d.t { + case DtVoid, DtBool, DtChar: + d.charData, err = r.ReadCertainBytes(count) + case DtShort: + d.shortData, err = readShortsWithBigEndian(count, r, d.bo) + case DtFloat: + d.floatData, err = readFloatsWithBigEndian(count, r, d.bo) + case DtDouble: + d.doubleData, err = readDoublesWithBigEndian(count, r, d.bo) + case DtDuration: + d.durationData, err = readDurationsWithBigEndian(count, r, d.bo) + case DtInt, DtDate, DtMonth, DtTime, DtMinute, DtSecond, DtDatetime, DtDateHour, DtDateMinute: + d.intData, err = readIntWithBigEndian(count, r, d.bo) + case DtLong, DtTimestamp, DtNanoTime, DtNanoTimestamp: + d.longData, err = readLongsWithBigEndian(count, r, d.bo) + case DtInt128, DtIP, DtUUID: + d.long2Data, err = readLong2sWithBigEndian(count, r, d.bo) + case DtComplex, DtPoint: + d.double2Data, err = readDouble2sWithBigEndian(count, r, d.bo) + case DtString, DtCode, DtFunction, DtHandle, DtSymbol: + d.stringData, err = readStrings(count, r) + case DtBlob: + d.blobData, err = readBlobs(count, r, d.bo) + case DtAny: + d.anyData, err = readAny(count, r, d.bo) + } + + return err +} diff --git a/model/io_test.go b/model/io_test.go new file mode 100644 index 0000000..eb53405 --- /dev/null +++ b/model/io_test.go @@ -0,0 +1,317 @@ +package model + +import ( + "bytes" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/stretchr/testify/assert" +) + +func TestIo(t *testing.T) { + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + r := protocol.NewReader(by) + bo := protocol.LittleEndian + + dt, err := NewDataType(DtString, "io test") + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x69, 0x6f, 0x20, 0x74, 0x65, 0x73, 0x74, 0x0}) + + pDt, err := read(r, DtString, bo) + assert.Nil(t, err) + assert.Equal(t, pDt.Value(), dt.Value()) + + dt, err = NewDataType(DtFloat, float32(1.0)) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x80, 0x3f}) + + pDt, err = read(r, DtFloat, bo) + assert.Nil(t, err) + assert.Equal(t, pDt.Value(), dt.Value()) + + dt, err = NewDataType(DtDouble, float64(1)) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f}) + + pDt, err = read(r, DtDouble, bo) + assert.Nil(t, err) + assert.Equal(t, pDt.Value(), dt.Value()) + + dt, err = NewDataType(DtDuration, "10H") + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0xa, 0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0}) + + pDt, err = read(r, DtDuration, bo) + assert.Nil(t, err) + assert.Equal(t, pDt.Value(), dt.Value()) + + dt, err = NewDataType(DtComplex, [2]float64{1, 1}) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f}) + + pDt, err = read(r, DtComplex, bo) + assert.Nil(t, err) + assert.Equal(t, pDt.Value(), dt.Value()) + + dt, err = NewDataType(DtBlob, []byte{1, 2, 3, 4, 5}) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x5, 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4, 0x5}) + + pDt, err = read(r, DtBlob, bo) + assert.Nil(t, err) + assert.Equal(t, pDt.Value(), dt.Value()) + + dt, err = NewDataType(DtShort, int16(10)) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0xa, 0x0}) + + pDt, err = read(r, DtShort, bo) + assert.Nil(t, err) + assert.Equal(t, pDt.Value(), dt.Value()) + + s := NewScalar(dt) + dt, err = NewDataType(DtAny, s) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x3, 0x0, 0xa, 0x0}) + + l, err := readList(r, DtAny, bo, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), dt.Value()) + + dt, err = NewDataType(DtBool, byte(1)) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x1}) + + l, err = readList(r, DtBool, bo, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), dt.Value()) + + dt, err = NewDataType(DtShort, int16(10)) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0xa, 0x0}) + + l, err = readList(r, DtShort, bo, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), dt.Value()) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0xa, 0x0}) + + l, err = readList(r, DtShort, protocol.BigEndian, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), int16(2560)) + + dt, err = NewDataType(DtLong, int64(10)) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0xa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}) + + l, err = readList(r, DtLong, bo, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), int64(10)) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0xa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}) + + pDt, err = read(r, DtLong, bo) + assert.Nil(t, err) + assert.Equal(t, pDt.Value(), dt.Value()) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0xa, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}) + + l, err = readList(r, DtLong, protocol.BigEndian, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), int64(720575940379279360)) + + dt, err = NewDataType(DtInt, int32(10)) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0xa, 0x0, 0x0, 0x0}) + + l, err = readList(r, DtInt, protocol.BigEndian, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), int32(167772160)) + + dt, err = NewDataType(DtInt128, "e1671797c52e15f763380b45e841ec32") + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x32, 0xec, 0x41, 0xe8, 0x45, 0xb, 0x38, 0x63, 0xf7, 0x15, 0x2e, 0xc5, 0x97, 0x17, 0x67, 0xe1}) + + pDt, err = read(r, DtInt128, bo) + assert.Nil(t, err) + assert.Equal(t, pDt.Value(), dt.Value()) + + dt, err = NewDataType(DtUUID, "e5eca940-5b99-45d0-bf1c-620f6b1b9d5b") + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x5b, 0x9d, 0x1b, 0x6b, 0xf, 0x62, 0x1c, 0xbf, 0xd0, 0x45, 0x99, 0x5b, 0x40, 0xa9, 0xec, 0xe5}) + + l, err = readList(r, DtUUID, bo, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), dt.Value()) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x5b, 0x9d, 0x1b, 0x6b, 0xf, 0x62, 0x1c, 0xbf, 0xd0, 0x45, 0x99, 0x5b, 0x40, 0xa9, 0xec, 0xe5}) + + l, err = readList(r, DtUUID, protocol.BigEndian, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), "40a9ece5-995b-d045-1cbf-5b9d1b6b0f620000") + + dt, err = NewDataType(DtFloat, float32(1)) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x80, 0x3f}) + + l, err = readList(r, DtFloat, bo, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), dt.Value()) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x80, 0x3f}) + + l, err = readList(r, DtFloat, protocol.BigEndian, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), float32(4.6006e-41)) + + dt, err = NewDataType(DtDouble, float64(1)) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f}) + + l, err = readList(r, DtDouble, bo, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), dt.Value()) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f}) + + l, err = readList(r, DtDouble, protocol.BigEndian, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), 3.03865e-319) + + dt, err = NewDataType(DtDuration, "10H") + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0xa, 0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0}) + + l, err = readList(r, DtDuration, bo, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), dt.Value()) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0xa, 0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0}) + + l, err = readList(r, DtDuration, protocol.BigEndian, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), "167772160") + + dt, err = NewDataType(DtComplex, [2]float64{1, 1}) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f}) + + l, err = readList(r, DtComplex, bo, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), dt.Value()) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xf0, 0x3f}) + + l, err = readList(r, DtComplex, protocol.BigEndian, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), "0.00000+0.00000i") + + dt, err = NewDataType(DtBlob, []byte{1, 2, 3, 4}) + assert.Nil(t, err) + + err = dt.Render(w, bo) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.Bytes(), []byte{0x4, 0x0, 0x0, 0x0, 0x1, 0x2, 0x3, 0x4}) + + l, err = readList(r, DtBlob, bo, 1) + assert.Nil(t, err) + assert.Equal(t, l.Get(0).Value(), dt.Value()) +} diff --git a/model/matrix.go b/model/matrix.go new file mode 100644 index 0000000..720524f --- /dev/null +++ b/model/matrix.go @@ -0,0 +1,170 @@ +package model + +import ( + "fmt" + "strings" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +// Matrix is a DataForm. +// Refer to https://www.dolphindb.cn/cn/help/130/DataTypesandStructures/DataForms/Matrix.html for details. +type Matrix struct { + // The first bit of the first byte determines if the matrix has row labels + // The second bit of the first byte determines if the matrix has column labels + category *Category + + RowLabels *Vector + ColumnLabels *Vector + Data *Vector +} + +// NewMatrix returns an object of matrix according to data, rowLabels and columnLabels. +// RowLabels and columnLabels are optional. +// You can instantiate the Vector object by using NewVector. +func NewMatrix(data, rowLabels, columnLabels *Vector) *Matrix { + return &Matrix{ + category: &Category{ + DataForm: DfMatrix, + DataType: data.GetDataType(), + }, + Data: data, + RowLabels: rowLabels, + ColumnLabels: columnLabels, + } +} + +// GetDataForm returns the byte type of the DataForm. +func (mtx *Matrix) GetDataForm() DataFormByte { + return DfMatrix +} + +// GetDataType returns the byte type of the DataType. +func (mtx *Matrix) GetDataType() DataTypeByte { + return mtx.category.DataType +} + +// GetDataTypeString returns the string format of the DataType. +func (mtx *Matrix) GetDataTypeString() string { + return GetDataTypeString(mtx.category.DataType) +} + +// Rows returns the row num of the DataForm. +func (mtx *Matrix) Rows() int { + return int(mtx.Data.RowCount) +} + +// Get gets DataType from matrix. +func (mtx *Matrix) Get(row, col int) DataType { + return mtx.Data.Get(mtx.getIndex(row, col)) +} + +// Set sets DataType for mtx according to the row and col. +// index = col*row + row. +// If index >= len(mtx.Data), return an error. +// If index < len(mtx.Data), cover the original value. +func (mtx *Matrix) Set(row, col int, d DataType) error { + return mtx.Data.Set(mtx.getIndex(row, col), d) +} + +// SetNull set the DataType in Matrix to null according to the row and col. +func (mtx *Matrix) SetNull(row, col int) { + mtx.Data.SetNull(mtx.getIndex(row, col)) +} + +// IsNull checks whether the value located by row and col is null. +func (mtx *Matrix) IsNull(row, col int) bool { + return mtx.Data.IsNull(mtx.getIndex(row, col)) +} + +// Render serializes the DataForm with bo and input it into w. +func (mtx *Matrix) Render(w *protocol.Writer, bo protocol.ByteOrder) error { + err := mtx.category.render(w) + if err != nil { + return err + } + + labelFlag := 0 + if mtx.RowLabels != nil { + labelFlag++ + } + + if mtx.ColumnLabels != nil { + labelFlag += 2 + } + + err = w.Write([]byte{byte(labelFlag)}) + if err != nil { + return err + } + + if mtx.RowLabels != nil { + err = mtx.RowLabels.Render(w, bo) + if err != nil { + return err + } + } + + if mtx.ColumnLabels != nil { + err = mtx.ColumnLabels.Render(w, bo) + if err != nil { + return err + } + } + + err = mtx.category.render(w) + if err != nil { + return err + } + + err = mtx.Data.renderLength(w, bo) + if err != nil { + return err + } + + return mtx.Data.Data.Render(w, bo) +} + +func (mtx *Matrix) String() string { + if mtx.Data == nil { + return "" + } + + by := strings.Builder{} + by.WriteString(fmt.Sprintf("matrix<%s>[%dr][%dc]({\n", GetDataTypeString(mtx.Data.GetDataType()), + mtx.Data.RowCount, mtx.Data.ColumnCount)) + + if mtx.RowLabels != nil && mtx.RowLabels.Data != nil { + val := mtx.RowLabels.formatString() + by.WriteString(fmt.Sprintf(" rows: [%s],\n", strings.Join(val, ", "))) + } else { + by.WriteString(" rows: null,\n") + } + + if mtx.ColumnLabels != nil && mtx.ColumnLabels.Data != nil { + val := mtx.ColumnLabels.formatString() + by.WriteString(fmt.Sprintf(" cols: [%s],\n", strings.Join(val, ", "))) + } else { + by.WriteString(" cols: null,\n") + } + + if mtx.Data != nil && mtx.Data.Data != nil { + val := mtx.Data.formatString() + by.WriteString(fmt.Sprintf(" data: %sArray(%d) [\n", GetDataTypeString(mtx.Data.GetDataType()), + mtx.Data.ColumnCount*mtx.Data.RowCount)) + for _, v := range val { + by.WriteString(fmt.Sprintf(" %s,\n", v)) + } + + by.WriteString(" ]\n") + } else { + by.WriteString(" data: null,\n") + } + + by.WriteString("})") + return by.String() +} + +func (mtx *Matrix) getIndex(row, col int) int { + return col*mtx.Rows() + row +} diff --git a/model/matrix_test.go b/model/matrix_test.go new file mode 100644 index 0000000..3e35d41 --- /dev/null +++ b/model/matrix_test.go @@ -0,0 +1,51 @@ +package model + +import ( + "bytes" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/stretchr/testify/assert" +) + +const matrixExpect = "matrix[3r][1c]({\n rows: [value1, value2, value3],\n cols: [value1, value2, value3],\n data: stringArray(3) [\n key1,\n key2,\n key3,\n ]\n})" + +func TestMatrix(t *testing.T) { + data, err := NewDataTypeListWithRaw(DtString, []string{"key1", "key2", "key3"}) + assert.Nil(t, err) + + rl, err := NewDataTypeListWithRaw(DtString, []string{"value1", "value2", "value3"}) + assert.Nil(t, err) + + cl, err := NewDataTypeListWithRaw(DtString, []string{"value1", "value2", "value3"}) + assert.Nil(t, err) + + mtx := NewMatrix(NewVector(data), NewVector(rl), NewVector(cl)) + assert.Equal(t, mtx.GetDataForm(), DfMatrix) + assert.Equal(t, mtx.GetDataType(), DtString) + assert.Equal(t, mtx.GetDataTypeString(), "string") + assert.Equal(t, mtx.Rows(), 3) + + dt := mtx.Get(2, 0) + assert.Equal(t, dt.String(), "key3") + assert.False(t, mtx.IsNull(2, 0)) + + mtx.SetNull(2, 0) + assert.True(t, mtx.IsNull(2, 0)) + assert.Equal(t, mtx.Get(2, 0).String(), "") + + dt, err = NewDataType(DtString, "key3") + assert.Nil(t, err) + + err = mtx.Set(2, 0, dt) + assert.Nil(t, err) + assert.False(t, mtx.IsNull(2, 0)) + + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = mtx.Render(w, protocol.LittleEndian) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.String(), "\x12\x03\x03\x12\x01\x03\x00\x00\x00\x01\x00\x00\x00value1\x00value2\x00value3\x00\x12\x01\x03\x00\x00\x00\x01\x00\x00\x00value1\x00value2\x00value3\x00\x12\x03\x03\x00\x00\x00\x01\x00\x00\x00key1\x00key2\x00key3\x00") + assert.Equal(t, mtx.String(), matrixExpect) +} diff --git a/model/pair.go b/model/pair.go new file mode 100644 index 0000000..0fe467e --- /dev/null +++ b/model/pair.go @@ -0,0 +1,76 @@ +package model + +import ( + "fmt" + "strings" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +// Pair is a DataForm. +// Refer to https://www.dolphindb.cn/cn/help/130/DataTypesandStructures/DataForms/Pair.html for details. +type Pair struct { + category *Category + + Vector *Vector +} + +// NewPair returns an object of pair with specified vector v. +// You can instantiate it by NewVector. +func NewPair(v *Vector) *Pair { + return &Pair{ + category: &Category{ + DataForm: DfPair, + DataType: v.GetDataType(), + }, + Vector: v, + } +} + +// Rows returns the row num of the DataForm. +func (p *Pair) Rows() int { + return int(p.Vector.RowCount) +} + +// GetDataForm returns the byte type of the DataForm. +func (p *Pair) GetDataForm() DataFormByte { + return DfPair +} + +// GetDataType returns the byte type of the DataType. +func (p *Pair) GetDataType() DataTypeByte { + return p.category.DataType +} + +// GetDataTypeString returns the string format of the DataType. +func (p *Pair) GetDataTypeString() string { + return GetDataTypeString(p.category.DataType) +} + +// Render serializes the DataForm with bo and input it into w. +func (p *Pair) Render(w *protocol.Writer, bo protocol.ByteOrder) error { + err := p.category.render(w) + if err != nil { + return err + } + + buf := make([]byte, 8) + + bo.PutUint32(buf[:4], 2) + bo.PutUint32(buf[4:], 1) + err = w.Write(buf) + if err != nil { + return err + } + + return p.Vector.renderData(w, bo) +} + +func (p *Pair) String() string { + if p.Vector == nil { + return "" + } + + val := p.Vector.formatString() + return fmt.Sprintf("pair<%s>([%s])", GetDataTypeString(p.Vector.GetDataType()), strings.Join(val, ", ")) +} diff --git a/model/pair_test.go b/model/pair_test.go new file mode 100644 index 0000000..6554c31 --- /dev/null +++ b/model/pair_test.go @@ -0,0 +1,30 @@ +package model + +import ( + "bytes" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/stretchr/testify/assert" +) + +const pairExpect = "pair([key1, key2])" + +func TestPair(t *testing.T) { + data, err := NewDataTypeListWithRaw(DtString, []string{"key1", "key2"}) + assert.Nil(t, err) + + pair := NewPair(NewVector(data)) + assert.Equal(t, pair.GetDataForm(), DfPair) + assert.Equal(t, pair.GetDataType(), DtString) + assert.Equal(t, pair.GetDataTypeString(), "string") + assert.Equal(t, pair.Rows(), 2) + + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = pair.Render(w, protocol.LittleEndian) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.String(), "\x12\x02\x02\x00\x00\x00\x01\x00\x00\x00key1\x00key2\x00") + assert.Equal(t, pair.String(), pairExpect) +} diff --git a/model/parse_dataform.go b/model/parse_dataform.go new file mode 100644 index 0000000..0d1194b --- /dev/null +++ b/model/parse_dataform.go @@ -0,0 +1,297 @@ +package model + +import ( + "fmt" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +func parseDictionary(r protocol.Reader, bo protocol.ByteOrder, c *Category) (*Dictionary, error) { + var err error + dict := &Dictionary{ + category: c, + } + + dict.Keys, err = parseVectorWithCategory(r, bo) + if err != nil { + return nil, err + } + + dict.Values, err = parseVectorWithCategory(r, bo) + if err != nil { + return nil, err + } + + return dict, nil +} + +func parseMatrix(r protocol.Reader, bo protocol.ByteOrder, c *Category) (*Matrix, error) { + mtx := &Matrix{ + category: c, + } + + buf, err := r.ReadByte() + if err != nil { + return nil, err + } + + if buf&0x01 == 0x01 { + mtx.RowLabels, err = parseVectorWithCategory(r, bo) + if err != nil { + return nil, err + } + } + + if buf&0x02 == 0x02 { + mtx.ColumnLabels, err = parseVectorWithCategory(r, bo) + if err != nil { + return nil, err + } + } + + mtx.Data, err = parseVectorWithCategory(r, bo) + if err != nil { + return nil, err + } + + return mtx, nil +} + +func parsePair(r protocol.Reader, bo protocol.ByteOrder, c *Category) (*Pair, error) { + var err error + pr := &Pair{ + category: c, + } + + pr.Vector, err = parseVector(r, bo, c) + if err != nil { + return nil, err + } + + return pr, nil +} + +func parseSet(r protocol.Reader, bo protocol.ByteOrder, c *Category) (*Set, error) { + var err error + s := &Set{ + category: c, + } + + s.Vector, err = parseVectorWithCategory(r, bo) + if err != nil { + return nil, err + } + + return s, nil +} + +func parseTable(r protocol.Reader, bo protocol.ByteOrder, c *Category) (*Table, error) { + var err error + tl := &Table{ + category: c, + } + + tl.rowCount, tl.columnCount, err = read2Uint32(r, bo) + if err != nil { + return nil, err + } + + tl.tableName, err = readDataType(r, DtString, bo) + if err != nil { + return nil, err + } + + tl.columnNames, err = readList(r, DtString, bo, int(tl.columnCount)) + if err != nil { + return nil, err + } + + tl.ColNames = tl.columnNames.StringList() + + tl.columnValues, err = parseVectorWithCategoryList(r, bo, int(tl.columnCount)) + if err != nil { + return nil, err + } + + return tl, nil +} + +func parseVectorWithCategory(r protocol.Reader, bo protocol.ByteOrder) (*Vector, error) { + c, err := parseCategory(r) + if err != nil { + return nil, err + } + + return parseVector(r, bo, c) +} + +func parseVectorWithCategoryList(r protocol.Reader, bo protocol.ByteOrder, count int) ([]*Vector, error) { + var err error + list := make([]*Vector, count) + for i := 0; i < count; i++ { + list[i], err = parseVectorWithCategory(r, bo) + if err != nil { + return nil, err + } + } + + return list, nil +} + +func parseVector(r protocol.Reader, bo protocol.ByteOrder, c *Category) (*Vector, error) { + var err error + vct := &Vector{ + category: c, + } + + vct.RowCount, vct.ColumnCount, err = read2Uint32(r, bo) + if err != nil { + return nil, err + } + + err = readVectorData(r, bo, vct) + if err != nil { + return nil, err + } + + return vct, nil +} + +func readVectorData(r protocol.Reader, bo protocol.ByteOrder, dv *Vector) error { + var err error + + dt := dv.GetDataType() + switch { + case dt > 128: + dv.Extend = new(DataTypeExtend) + dv.Extend.BaseID, dv.Extend.BaseSize, err = read2Uint32(r, bo) + if err != nil { + return err + } + + if dv.Extend.BaseSize != 0 { + dv.Extend.Base, err = readList(r, DtString, bo, int(dv.Extend.BaseSize)) + if err != nil { + return err + } + } + + dv.Data, err = readList(r, DtInt, bo, int(dv.RowCount*dv.ColumnCount)) + if err != nil { + return err + } + case dt > 64: + err = parseArrayVector(r, bo, dv) + if err != nil { + return err + } + default: + dv.Data, err = readList(r, dt, bo, int(dv.RowCount*dv.ColumnCount)) + } + + return err +} + +func parseArrayVector(r protocol.Reader, bo protocol.ByteOrder, dv *Vector) error { + arrVct := make([]*ArrayVector, 0) + dt := dv.GetDataType() - 64 + for i := 0; i < int(dv.RowCount); { + rc, cc, err := read2Uint16(r, bo) + if err != nil { + return err + } + + total := 0 + buf, err := r.ReadCertainBytes(int(rc * cc)) + if err != nil { + return err + } + + switch { + case cc == 1: + res := protocol.Uint8SliceFromByteSlice(buf) + for _, v := range res { + total += int(v) + } + case cc == 2: + res := protocol.Uint16SliceFromByteSlice(buf) + for _, v := range res { + total += int(v) + } + case cc == 4: + res := protocol.Uint32SliceFromByteSlice(buf) + for _, v := range res { + total += int(v) + } + } + + i += int(rc) + data, err := readList(r, dt, bo, total) + if err != nil { + return err + } + + arrVct = append(arrVct, &ArrayVector{ + rowCount: rc, + unit: cc, + lengths: buf, + data: data, + }) + } + + dv.ArrayVector = arrVct + return nil +} + +func parseScalar(r protocol.Reader, bo protocol.ByteOrder, c *Category) (*Scalar, error) { + var err error + s := &Scalar{ + category: c, + } + + s.DataType, err = readDataType(r, c.DataType, bo) + return s, err +} + +func parseChart(r protocol.Reader, bo protocol.ByteOrder, c *Category) (*Chart, error) { + var err error + ch := &Chart{ + category: c, + } + + vc, err := parseVectorWithCategory(r, bo) + if err != nil { + return nil, err + } + + values, err := parseVectorWithCategory(r, bo) + if err != nil { + return nil, err + } + + if values.GetDataType() != DtAny { + return nil, fmt.Errorf("invalid data") + } + + keys := vc.Data.StringList() + val := values.Data.Value() + for k, v := range keys { + df := val[k].(DataForm) + switch v { + case "title": + ch.Title = df.(*Vector) + case "chartType": + ch.ChartType = df.(*Scalar) + case "stacking": + ch.Stacking = df.(*Scalar) + case "extras": + ch.Extras = df.(*Dictionary) + case "data": + ch.Data = df.(*Matrix) + } + } + + ch.rowCount = len(keys) + + return ch, nil +} diff --git a/model/parse_dataform_test.go b/model/parse_dataform_test.go new file mode 100644 index 0000000..4aa6a27 --- /dev/null +++ b/model/parse_dataform_test.go @@ -0,0 +1,75 @@ +package model + +import ( + "bytes" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + + "github.com/stretchr/testify/assert" +) + +var rawDataFormBytes = map[DataFormByte][]byte{ + DfScalar: {115, 99, 97, 108, 97, 114, 0}, + DfTable: {1, 0, 0, 0, 1, 0, 0, 0, 116, 97, 98, 108, 101, 0, 99, 111, 108, 0, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 99, 111, 108, 49, 0, 99, 111, 108, 49, 0, 99, 111, 108, 49, 0}, + DfVector: {1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 118, 101, 99, 116, 111, 114, 0, 118, 101, 99, 116, 111, 114, 0, 0, 0, 0, 0}, + DfPair: {3, 0, 0, 0, 1, 0, 0, 0, 107, 101, 121, 49, 0, 107, 101, 121, 50, 0, 107, 101, 121, 51, 0}, + DfMatrix: {3, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 118, 97, 108, 117, 101, 49, 0, 118, 97, 108, 117, 101, 50, 0, 118, 97, 108, 117, 101, 51, 0, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 118, 97, 108, 117, 101, 49, 0, 118, 97, 108, 117, 101, 50, 0, 118, 97, 108, 117, 101, 51, 0, 18, 3, 3, 0, 0, 0, 1, 0, 0, 0, 107, 101, 121, 49, 0, 107, 101, 121, 50, 0, 107, 101, 121, 51, 0}, + DfSet: {18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 107, 101, 121, 49, 0, 107, 101, 121, 50, 0, 107, 101, 121, 51, 0}, + DfDictionary: {18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 107, 101, 121, 49, 0, 107, 101, 121, 50, 0, 107, 101, 121, 51, 0, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 118, 97, 108, 117, 101, 49, 0, 118, 97, 108, 117, 101, 50, 0, 118, 97, 108, 117, 101, 51, 0}, + DfChart: {18, 1, 5, 0, 0, 0, 1, 0, 0, 0, 116, 105, 116, 108, 101, 0, 99, 104, 97, 114, 116, 84, 121, 112, 101, 0, 115, 116, 97, 99, 107, 105, 110, 103, 0, 100, 97, 116, 97, 0, 101, 120, 116, 114, 97, 115, 0, 25, 1, 5, 0, 0, 0, 1, 0, 0, 0, 18, 1, 1, 0, 0, 0, 1, 0, 0, 0, 99, 104, 97, 114, 116, 0, 4, 0, 4, 0, 0, 0, 1, 0, 0, 18, 3, 0, 18, 3, 3, 0, 0, 0, 1, 0, 0, 0, 109, 49, 0, 109, 50, 0, 109, 51, 0, 18, 5, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 107, 101, 121, 49, 0, 107, 101, 121, 50, 0, 107, 101, 121, 51, 0, 18, 1, 3, 0, 0, 0, 1, 0, 0, 0, 118, 97, 108, 117, 101, 49, 0, 118, 97, 108, 117, 101, 50, 0, 118, 97, 108, 117, 101, 51, 0}, +} + +func TestParseDataForm(t *testing.T) { + by := bytes.NewBuffer([]byte{}) + r := protocol.NewReader(by) + bo := protocol.LittleEndian + + by.Write(rawDataFormBytes[DfDictionary]) + c := newCategory(byte(DfDictionary), byte(DtString)) + dict, err := parseDictionary(r, bo, c) + assert.Nil(t, err) + assert.Equal(t, dict.String(), "dict([\n string[3]([key1, key2, key3]),\n string[3]([value1, value2, value3]),\n])") + + by.Write(rawDataFormBytes[DfMatrix]) + c = newCategory(byte(DfMatrix), byte(DtString)) + m, err := parseMatrix(r, bo, c) + assert.Nil(t, err) + assert.Equal(t, m.String(), "matrix[3r][1c]({\n rows: [value1, value2, value3],\n cols: [value1, value2, value3],\n data: stringArray(3) [\n key1,\n key2,\n key3,\n ]\n})") + + by.Write(rawDataFormBytes[DfPair]) + c = newCategory(byte(DfPair), byte(DtString)) + pair, err := parsePair(r, bo, c) + assert.Nil(t, err) + assert.Equal(t, pair.String(), "pair([key1, key2, key3])") + + by.Write(rawDataFormBytes[DfSet]) + c = newCategory(byte(DfSet), byte(DtString)) + set, err := parseSet(r, bo, c) + assert.Nil(t, err) + assert.Equal(t, set.String(), "set[3]([key1, key2, key3])") + + by.Write(rawDataFormBytes[DfTable]) + c = newCategory(byte(DfTable), byte(DtVoid)) + tb, err := parseTable(r, bo, c) + assert.Nil(t, err) + assert.Equal(t, tb.String(), "table[1r][1c]([\n\t string[3]('col', [col1, col1, col1])\n\t])") + + by.Write(rawDataFormBytes[DfVector]) + c = newCategory(byte(DfVector), 145) + vc, err := parseVector(r, bo, c) + assert.Nil(t, err) + assert.Equal(t, vc.String(), "vector([vector])") + + by.Write(rawDataFormBytes[DfScalar]) + c = newCategory(byte(DfScalar), byte(DtString)) + sca, err := parseScalar(r, bo, c) + assert.Nil(t, err) + assert.Equal(t, sca.String(), "string(scalar)") + + by.Write(rawDataFormBytes[DfChart]) + c = newCategory(byte(DfChart), byte(DtAny)) + ch, err := parseChart(r, bo, c) + assert.Nil(t, err) + assert.Equal(t, ch.String(), "Chart({\n title: [chart]\n chartType: CT_LINE\n stacking: false\n data: matrix[3r][1c]({\n rows: null,\n cols: null,\n data: stringArray(3) [\n m1,\n m2,\n m3,\n ]\n})\n extras: dict([\n string[3]([key1, key2, key3]),\n string[3]([value1, value2, value3]),\n])\n})") +} diff --git a/model/parse_datatype.go b/model/parse_datatype.go new file mode 100644 index 0000000..220fd04 --- /dev/null +++ b/model/parse_datatype.go @@ -0,0 +1,158 @@ +package model + +import ( + "fmt" + "math" + "time" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +func parseDate(raw interface{}) time.Time { + res := raw.(int32) + if res == math.MinInt32 { + return time.Time{} + } + + return originalTime.Add(time.Duration(res*24) * time.Hour) +} + +func parseMonth(raw interface{}) time.Time { + res := raw.(int32) + if res == math.MinInt32 { + return time.Time{} + } + year := res / 12 + month := res%12 + 1 + + return time.Date(int(year), time.Month(month), 1, 0, 0, 0, 0, time.UTC) +} + +func parseTime(raw interface{}) time.Time { + res := raw.(int32) + if res == math.MinInt32 { + return time.Time{} + } + return originalTime.Add(time.Duration(res) * time.Millisecond) +} + +func parseMinute(raw interface{}) time.Time { + res := raw.(int32) + if res == math.MinInt32 { + return time.Time{} + } + return originalTime.Add(time.Duration(res*60) * time.Second) +} + +func parseSecond(raw interface{}) time.Time { + res := raw.(int32) + if res == math.MinInt32 { + return time.Time{} + } + return originalTime.Add(time.Duration(res) * time.Second) +} + +func parseDateTime(raw interface{}) time.Time { + res := raw.(int32) + if res == math.MinInt32 { + return time.Time{} + } + return originalTime.Add(time.Duration(res) * time.Second) +} + +func parseDateHour(raw interface{}) time.Time { + res := raw.(int32) + if res == math.MinInt32 { + return time.Time{} + } + return originalTime.Add(time.Duration(res) * time.Hour) +} + +func parseTimeStamp(raw interface{}) time.Time { + res := raw.(int64) + if res == math.MinInt64 { + return time.Time{} + } + return originalTime.Add(time.Duration(res) * time.Millisecond) +} + +func parseNanoTime(raw interface{}) time.Time { + res := raw.(int64) + if res == math.MinInt64 { + return time.Time{} + } + return originalTime.Add(time.Duration(res) * time.Nanosecond) +} + +func parseNanoTimeStamp(raw interface{}) time.Time { + res := raw.(int64) + if res == math.MinInt64 { + return time.Time{} + } + return originalTime.Add(time.Duration(res) * time.Nanosecond) +} + +func parseDuration(raw interface{}) string { + du := raw.([2]uint32) + unit := durationUnit[du[1]] + if du[0] == MinInt32 { + return "" + } + return fmt.Sprintf("%d%s", du[0], unit) +} + +func parseComplex(raw interface{}) string { + fp := raw.([2]float64) + if fp[0] == -math.MaxFloat64 || fp[1] == -math.MaxFloat64 { + return "" + } + return fmt.Sprintf("%.5f+%.5fi", fp[0], fp[1]) +} + +func parsePoint(raw interface{}) string { + fp := raw.([2]float64) + if fp[0] == -math.MaxFloat64 || fp[1] == -math.MaxFloat64 { + return emptyPoint + } + return fmt.Sprintf("(%.5f, %.5f)", fp[0], fp[1]) +} + +func parseIP(raw interface{}, bo protocol.ByteOrder) string { + p := raw.([2]uint64) + if p[0] == 0 && p[1] == 0 { + return "0.0.0.0" + } + + low := make([]byte, 8) + bo.PutUint64(low, p[0]) + if p[1] == 0 { + return fmt.Sprintf("%d.%d.%d.%d", low[3], low[2], low[1], low[0]) + } + + high := make([]byte, 8) + bo.PutUint64(high, p[1]) + return fmt.Sprintf("%x:%x:%x:%x:%x:%x:%x:%x", bo.Uint16(high[6:8]), bo.Uint16(high[4:6]), bo.Uint16(high[2:4]), + bo.Uint16(high[0:2]), bo.Uint16(low[6:8]), bo.Uint16(low[4:6]), bo.Uint16(low[2:4]), bo.Uint16(low[0:2])) +} + +func parseUUID(raw interface{}, bo protocol.ByteOrder) string { + p := raw.([2]uint64) + if p[0] == 0 || p[1] == 0 { + return "00000000-0000-0000-0000-000000000000" + } + + high, low := make([]byte, 8), make([]byte, 8) + bo.PutUint64(high, p[1]) + bo.PutUint64(low, p[0]) + + return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", bo.Uint32(high[4:]), bo.Uint16(high[2:4]), + bo.Uint16(high[0:2]), bo.Uint16(low[6:8]), bo.Uint64(append(low[0:6], 0, 0))) +} + +func parseInt128(raw interface{}) string { + p := raw.([2]uint64) + if p[0] == 0 || p[1] == 0 { + return "00000000000000000000000000000000" + } + return fmt.Sprintf("%016x%016x", p[1], p[0]) +} diff --git a/model/parse_datatype_test.go b/model/parse_datatype_test.go new file mode 100644 index 0000000..54ad889 --- /dev/null +++ b/model/parse_datatype_test.go @@ -0,0 +1,85 @@ +package model + +import ( + "math" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + + "github.com/stretchr/testify/assert" +) + +func TestParseDataType(t *testing.T) { + bo := protocol.LittleEndian + + res := parseDate(int32(4)) + assert.Equal(t, res.Format("2006.01.02"), "1970.01.05") + + res = parseMonth(int32(24244)) + assert.Equal(t, res.Format("2006.01M"), "2020.05M") + + res = parseTimeStamp(int64(360000000)) + assert.Equal(t, res.Format("2006.01.02T15:04:05.000"), "1970.01.05T04:00:00.000") + + res = parseMinute(int32(6000)) + assert.Equal(t, res.Format("15:04M"), "04:00M") + + res = parseDateHour(int32(100)) + assert.Equal(t, res.Format("2006.01.02T15"), "1970.01.05T04") + + res = parseDateTime(int32(360000)) + assert.Equal(t, res.Format("2006.01.02T15:04:05"), "1970.01.05T04:00:00") + + res = parseSecond(int32(360000)) + assert.Equal(t, res.Format("15:04:05"), "04:00:00") + + res = parseMinute(int32(6000)) + assert.Equal(t, res.Format("15:04M"), "04:00M") + + res = parseTime(int32(360000000)) + assert.Equal(t, res.Format("15:04:05.000"), "04:00:00.000") + + high := int64(-2204767551958936073) + intP := [2]uint64{ + 7149476803327945778, + uint64(high), + } + ti := parseInt128(intP) + assert.Equal(t, ti, "e1671797c52e15f763380b45e841ec32") + + low := int64(-4675754494756414117) + high = int64(-1878940850640566832) + intP = [2]uint64{ + uint64(low), + uint64(high), + } + ti = parseUUID(intP, bo) + assert.Equal(t, ti, "e5eca940-5b99-45d0-bf1c-620f6b1b9d5b") + + intP = [2]uint64{ + 8526542638814027207, + 3777231640985064004, + } + ti = parseIP(intP, bo) + assert.Equal(t, ti, "346b:6c2a:3347:d244:7654:5d5a:bcbb:5dc7") + + fp := [2]float64{ + math.Float64frombits(4607182418800017408), + math.Float64frombits(4607182418800017408), + } + ti = parseComplex(fp) + assert.Equal(t, ti, "1.00000+1.00000i") + + du := [2]uint32{ + 10, + 5, + } + ti = parseDuration(du) + assert.Equal(t, ti, "10H") + + res = parseNanoTimeStamp(int64(360000000000000)) + assert.Equal(t, res.Format("2006.01.02T15:04:05.000000000"), "1970.01.05T04:00:00.000000000") + + res = parseNanoTime(int64(360000000000000)) + assert.Equal(t, res.Format("15:04:05.000000000"), "04:00:00.000000000") +} diff --git a/model/render_datatype.go b/model/render_datatype.go new file mode 100644 index 0000000..7c2b1dc --- /dev/null +++ b/model/render_datatype.go @@ -0,0 +1,400 @@ +package model + +import ( + "errors" + "strconv" + "strings" + "time" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +var originalTime = time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC) + +func renderDuration(val interface{}) ([2]uint32, error) { + b, ok := val.(string) + if !ok { + return [2]uint32{}, errors.New("the type of in must be string when datatype is DtDuration") + } + + return renderDurationFromString(b) +} + +func renderDouble2(val interface{}) ([2]float64, error) { + b, ok := val.([2]float64) + if !ok { + return [2]float64{}, errors.New("the type of in must be [2]float64 when datatype is DtComplex or DtPoint") + } + + return b, nil +} + +func renderBool(val interface{}) (uint8, error) { + b, ok := val.(byte) + if !ok { + return 0, errors.New("the type of in must be byte when datatype is DtBool") + } + + return renderBoolFromByte(b), nil +} + +func renderBlob(val interface{}) ([]byte, error) { + byt, ok := val.([]byte) + if !ok { + return nil, errors.New("the type of in must be []byte when datatype is DtBlob") + } + + return byt, nil +} + +func renderByte(val interface{}) (uint8, error) { + b, ok := val.(byte) + if !ok { + return 0, errors.New("the type of in must be byte when datatype is DtChar or DtCompress") + } + + return b, nil +} + +func renderDate(val interface{}) (int32, error) { + ti, ok := val.(time.Time) + if !ok { + return 0, errors.New("the type of in must be time.Time when datatype is DtDate") + } + + return renderDateFromTime(ti), nil +} + +func renderDateHour(val interface{}) (int32, error) { + ti, ok := val.(time.Time) + if !ok { + return 0, errors.New("the type of in must be time.Time when datatype is DtDateHour") + } + + return renderDateHourFromTime(ti), nil +} + +func renderDateTime(val interface{}) (int32, error) { + ti, ok := val.(time.Time) + if !ok { + return 0, errors.New("the type of in must be time.Time when datatype is DtDatetime") + } + + return renderDateTimeFromTime(ti), nil +} + +func renderDouble(val interface{}) (float64, error) { + f, ok := val.(float64) + if !ok { + return 0, errors.New("the type of in must be float64 when datatype is DtDouble") + } + + return f, nil +} + +func renderFloat(val interface{}) (float32, error) { + f, ok := val.(float32) + if !ok { + return 0, errors.New("the type of in must be float32 when datatype is DtFloat") + } + + return f, nil +} + +func renderInt(val interface{}) (int32, error) { + i, ok := val.(int32) + if !ok { + return 0, errors.New("the type of in must be int32 when datatype is DtInt") + } + + return i, nil +} + +func renderInt128(val interface{}) ([2]uint64, error) { + str, ok := val.(string) + if !ok { + return [2]uint64{}, errors.New("the type of in must be string when datatype is DtInt128") + } + + return renderInt128FromString(str), nil +} + +func renderIP(val interface{}, bo protocol.ByteOrder) ([2]uint64, error) { + str, ok := val.(string) + if !ok { + return [2]uint64{}, errors.New("the type of in must be string when datatype is DtIP") + } + + return renderIPFromString(str, bo), nil +} + +func renderLong(val interface{}) (int64, error) { + i, ok := val.(int64) + if !ok { + return 0, errors.New("the type of in must be int64 when datatype is DtLong") + } + + return i, nil +} + +func renderMinute(val interface{}) (int32, error) { + ti, ok := val.(time.Time) + if !ok { + return 0, errors.New("the type of in must be time.Time when datatype is DtMinute") + } + + return renderMinuteFromTime(ti), nil +} + +func renderMonth(val interface{}) (int32, error) { + ti, ok := val.(time.Time) + if !ok { + return 0, errors.New("the type of in must be time.Time when datatype is DtMonth") + } + + return renderMonthFromTime(ti), nil +} + +func renderNanoTime(val interface{}) (int64, error) { + ti, ok := val.(time.Time) + if !ok { + return 0, errors.New("the type of in must be time.Time when datatype is DtNanoTime") + } + + return renderNanoTimeFromTime(ti), nil +} + +func renderNanoTimestamp(val interface{}) (int64, error) { + ti, ok := val.(time.Time) + if !ok { + return 0, errors.New("the type of in must be time.Time when datatype is DtNanoTimestamp") + } + + return renderNanoTimestampFromTime(ti), nil +} + +func renderSecond(val interface{}) (int32, error) { + ti, ok := val.(time.Time) + if !ok { + return 0, errors.New("the type of in must be time.Time when datatype is DtSecond") + } + + return renderSecondFromTime(ti), nil +} + +func renderShort(val interface{}) (int16, error) { + i, ok := val.(int16) + if !ok { + return 0, errors.New("the type of in must be int16 when datatype is DtShort") + } + + return i, nil +} + +func renderTime(val interface{}) (int32, error) { + ti, ok := val.(time.Time) + if !ok { + return 0, errors.New("the type of in must be time.Time when datatype is DtTime") + } + + return renderTimeFromTime(ti), nil +} + +func renderTimestamp(val interface{}) (int64, error) { + ti, ok := val.(time.Time) + if !ok { + return 0, errors.New("the type of in must be time.Time when datatype is DtTimestamp") + } + + return renderTimestampFromTime(ti), nil +} + +func renderUUID(val interface{}) ([2]uint64, error) { + str, ok := val.(string) + if !ok { + return [2]uint64{}, errors.New("the type of in must be string when datatype is DtUuid") + } + + return renderUUIDFromString(str), nil +} + +func renderAny(val interface{}) (DataForm, error) { + dataForm, ok := val.(DataForm) + if !ok { + return nil, errors.New("the type of in must be DataForm when datatype is DtAny") + } + + return dataForm, nil +} + +func renderString(val interface{}) (string, error) { + str, ok := val.(string) + if !ok { + return "", errors.New("the type of in must be string when datatype is DtString, DtCode, DtFunction, DtHandle or DtSymbol") + } + + return str, nil +} + +func renderDurationFromString(val string) ([2]uint32, error) { + if val == "" { + return emptyDuration, nil + } + data := val[:len(val)-1] + i, err := strconv.Atoi(data) + if err != nil { + return [2]uint32{}, err + } + + return [2]uint32{uint32(i), durationUnitReverse[val[len(val)-1:]]}, nil +} + +func renderBoolFromByte(val byte) uint8 { + if val == 0 || val == MinInt8 { + return val + } + + return 1 +} + +func renderDateFromTime(ti time.Time) int32 { + if ti == NullTime { + return NullInt + } + ti = ti.UTC() + d := time.Date(ti.Year(), ti.Month(), ti.Day(), 0, 0, 0, 0, time.UTC) + return int32(d.Unix() / 86400) +} + +func renderDateHourFromTime(ti time.Time) int32 { + if ti == NullTime { + return NullInt + } + ti = ti.UTC() + d := time.Date(ti.Year(), ti.Month(), ti.Day(), ti.Hour(), 0, 0, 0, time.UTC) + return int32(d.Unix() / 3600) +} + +func renderDateTimeFromTime(ti time.Time) int32 { + if ti == NullTime { + return NullInt + } + ti = ti.UTC() + return int32(ti.Unix()) +} + +func renderInt128FromString(str string) [2]uint64 { + if str == "" { + return emptyInt64List + } + length := len(str) + return [2]uint64{ + stringToUint64(str[length/2:]), + stringToUint64(str[:length/2]), + } +} + +func renderIPFromString(str string, bo protocol.ByteOrder) [2]uint64 { + if str == "" { + return emptyInt64List + } + if strings.Contains(str, ":") { + val := strings.Split(str, ":") + return [2]uint64{ + stringToUint64(strings.Join(val[4:], "")), + stringToUint64(strings.Join(val[:4], "")), + } + } + + buf := make([]uint8, 8) + val := strings.Split(str, ".") + for k, v := range val { + i, err := strconv.Atoi(v) + if err != nil { + return [2]uint64{} + } + + buf[4-k-1] = uint8(i) + } + + return [2]uint64{ + bo.Uint64(protocol.ByteSliceFromUint8Slice(buf)), + 0, + } +} + +func renderMinuteFromTime(ti time.Time) int32 { + if ti == NullTime { + return NullInt + } + ti = ti.UTC() + d := (ti.Unix() - time.Date(ti.Year(), ti.Month(), ti.Day(), 0, 0, 0, 0, time.UTC).Unix()) / 60 + return int32(d) +} + +func renderMonthFromTime(ti time.Time) int32 { + if ti == NullTime { + return NullInt + } + ti = ti.UTC() + return int32(ti.Year()*12) + int32(ti.Month()) - 1 +} + +func renderNanoTimeFromTime(ti time.Time) int64 { + if ti == NullTime { + return NullLong + } + ti = ti.UTC() + return ti.Sub(time.Date(ti.Year(), ti.Month(), ti.Day(), 0, 0, 0, 0, time.UTC)).Nanoseconds() +} + +func renderNanoTimestampFromTime(ti time.Time) int64 { + if ti == NullTime { + return NullLong + } + ti = ti.UTC() + return ti.Sub(originalTime).Nanoseconds() +} + +func renderSecondFromTime(ti time.Time) int32 { + if ti == NullTime { + return NullInt + } + ti = ti.UTC() + d := ti.Unix() - time.Date(ti.Year(), ti.Month(), ti.Day(), 0, 0, 0, 0, time.UTC).Unix() + return int32(d) +} + +func renderTimeFromTime(ti time.Time) int32 { + if ti == NullTime { + return NullInt + } + ti = ti.UTC() + d := ti.Sub(time.Date(ti.Year(), ti.Month(), ti.Day(), 0, 0, 0, 0, time.UTC)).Milliseconds() + return int32(d) +} + +func renderTimestampFromTime(ti time.Time) int64 { + if ti == NullTime { + return NullLong + } + ti = ti.UTC() + if ti.Year() < 1970 { + ms := ti.Sub(time.Date(ti.Year(), 1, 1, 0, 0, 0, 0, time.UTC)).Milliseconds() + s := originalTime.Sub(time.Date(ti.Year(), 1, 1, 0, 0, 0, 0, time.UTC)).Milliseconds() + return ms - s + } + + return ti.Sub(originalTime).Milliseconds() +} + +func renderUUIDFromString(str string) [2]uint64 { + if str == "" || str == NullUUID { + return emptyInt64List + } + val := strings.Split(str, "-") + return [2]uint64{ + stringToUint64(strings.Join(val[3:], "")), + stringToUint64(strings.Join(val[:3], "")), + } +} diff --git a/model/render_datatype_test.go b/model/render_datatype_test.go new file mode 100644 index 0000000..4d2f465 --- /dev/null +++ b/model/render_datatype_test.go @@ -0,0 +1,19 @@ +package model + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestRenderDataType(t *testing.T) { + du, err := renderDuration("10H") + assert.Nil(t, err) + assert.Equal(t, du[0], uint32(10)) + assert.Equal(t, du[1], uint32(5)) + + fp, err := renderDouble2([2]float64{1, 1}) + assert.Nil(t, err) + assert.Equal(t, fp[0], float64(1)) + assert.Equal(t, fp[1], float64(1)) +} diff --git a/model/scalar.go b/model/scalar.go new file mode 100644 index 0000000..ce41186 --- /dev/null +++ b/model/scalar.go @@ -0,0 +1,71 @@ +package model + +import ( + "fmt" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +// Scalar is a DataForm. +// Refer to https://www.dolphindb.cn/cn/help/130/DataTypesandStructures/DataForms/Scalar.html for more details. +type Scalar struct { + category *Category + + DataType +} + +// NewScalar returns an object of scalar with d. +// You can instantiate the d by NewDataType. +func NewScalar(d DataType) *Scalar { + return &Scalar{ + category: &Category{ + DataForm: DfScalar, + DataType: d.DataType(), + }, + DataType: d, + } +} + +// Rows returns the row num of the DataForm. +func (s *Scalar) Rows() int { + return 1 +} + +// GetDataForm returns the byte type of the DataForm. +func (s *Scalar) GetDataForm() DataFormByte { + return DfScalar +} + +// GetDataType returns the byte type of the DataType. +func (s *Scalar) GetDataType() DataTypeByte { + return s.category.DataType +} + +// GetDataTypeString returns the string format of the DataType. +func (s *Scalar) GetDataTypeString() string { + return GetDataTypeString(s.category.DataType) +} + +// SetNull sets the value of scalar to null. +func (s *Scalar) SetNull() { + s.DataType.SetNull() +} + +// IsNull checks whether the value of scalar is null. +func (s *Scalar) IsNull() bool { + return s.DataType.IsNull() +} + +// Render serializes the DataForm with bo and input it into w. +func (s *Scalar) Render(w *protocol.Writer, bo protocol.ByteOrder) error { + if err := s.category.render(w); err != nil { + return err + } + + return s.DataType.Render(w, bo) +} + +func (s *Scalar) String() string { + val := s.DataType.String() + return fmt.Sprintf("%s(%s)", GetDataTypeString(s.DataType.DataType()), val) +} diff --git a/model/scalar_test.go b/model/scalar_test.go new file mode 100644 index 0000000..9cd771e --- /dev/null +++ b/model/scalar_test.go @@ -0,0 +1,34 @@ +package model + +import ( + "bytes" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/stretchr/testify/assert" +) + +const scalarExpect = "string(scalar)" + +func TestScalar(t *testing.T) { + data, err := NewDataType(DtString, "scalar") + assert.Nil(t, err) + + s := NewScalar(data) + assert.Equal(t, s.GetDataForm(), DfScalar) + assert.Equal(t, s.GetDataType(), DtString) + assert.Equal(t, s.GetDataTypeString(), "string") + assert.Equal(t, s.Rows(), 1) + + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = s.Render(w, protocol.LittleEndian) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.String(), "\x12\x00scalar\x00") + assert.Equal(t, s.String(), scalarExpect) + + assert.False(t, s.IsNull()) + s.SetNull() + assert.True(t, s.IsNull()) +} diff --git a/model/set.go b/model/set.go new file mode 100644 index 0000000..d2409b9 --- /dev/null +++ b/model/set.go @@ -0,0 +1,68 @@ +package model + +import ( + "fmt" + "strings" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +// Set is a DataForm. +// Refer to https://www.dolphindb.cn/cn/help/130/DataTypesandStructures/DataForms/Set.html for more details. +type Set struct { + category *Category + + Vector *Vector +} + +// NewSet returns an object of Set based on vector v. +// You can instantiate v by NewVector. +func NewSet(v *Vector) *Set { + return &Set{ + category: &Category{ + DataForm: DfSet, + DataType: v.GetDataType(), + }, + Vector: v, + } +} + +// Rows returns the row num of the DataForm. +func (s *Set) Rows() int { + return int(s.Vector.RowCount) +} + +// GetDataForm returns the byte type of the DataForm. +func (s *Set) GetDataForm() DataFormByte { + return DfSet +} + +// GetDataType returns the byte type of the DataType. +func (s *Set) GetDataType() DataTypeByte { + return s.category.DataType +} + +// GetDataTypeString returns the string format of the DataType. +func (s *Set) GetDataTypeString() string { + return GetDataTypeString(s.category.DataType) +} + +// Render serializes the DataForm with bo and input it into w. +func (s *Set) Render(w *protocol.Writer, bo protocol.ByteOrder) error { + if err := s.category.render(w); err != nil { + return err + } + + return s.Vector.Render(w, bo) +} + +func (s *Set) String() string { + if s.Vector == nil { + return "" + } + + val := s.Vector.formatString() + + return fmt.Sprintf("set<%s>[%d]([%s])", GetDataTypeString(s.Vector.GetDataType()), + s.Vector.ColumnCount*s.Vector.RowCount, strings.Join(val, ", ")) +} diff --git a/model/set_test.go b/model/set_test.go new file mode 100644 index 0000000..0c178f2 --- /dev/null +++ b/model/set_test.go @@ -0,0 +1,30 @@ +package model + +import ( + "bytes" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/stretchr/testify/assert" +) + +const setExpect = "set[3]([key1, key2, key3])" + +func TestSet(t *testing.T) { + data, err := NewDataTypeListWithRaw(DtString, []string{"key1", "key2", "key3"}) + assert.Nil(t, err) + + set := NewSet(NewVector(data)) + assert.Equal(t, set.GetDataForm(), DfSet) + assert.Equal(t, set.GetDataType(), DtString) + assert.Equal(t, set.GetDataTypeString(), "string") + assert.Equal(t, set.Rows(), 3) + + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = set.Render(w, protocol.LittleEndian) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.String(), "\x12\x04\x12\x01\x03\x00\x00\x00\x01\x00\x00\x00key1\x00key2\x00key3\x00") + assert.Equal(t, set.String(), setExpect) +} diff --git a/model/table.go b/model/table.go new file mode 100644 index 0000000..88f6053 --- /dev/null +++ b/model/table.go @@ -0,0 +1,177 @@ +package model + +import ( + "fmt" + "strings" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +// Table is a DataForm. +// Refer to https://www.dolphindb.cn/cn/help/130/DataTypesandStructures/DataForms/Table.html for more details. +type Table struct { + category *Category + columnNames DataTypeList + columnValues []*Vector + tableName DataType + rowCount uint32 + columnCount uint32 + + ColNames []string +} + +// NewTable returns an object of Table with colNames and colValues. +// You can instantiate the vector object by NewVector. +func NewTable(colNames []string, colValues []*Vector) *Table { + if len(colNames) != len(colValues) { + return nil + } + + names := make([]DataType, len(colNames)) + + for k, v := range colNames { + dt, _ := NewDataType(DtString, v) + names[k] = dt + } + + tbName, _ := NewDataType(DtString, "") + rowCount := 0 + if len(colValues) > 0 { + rowCount = colValues[0].Rows() + } + + return &Table{ + category: &Category{ + DataForm: DfTable, + DataType: DtVoid, + }, + ColNames: colNames, + columnNames: NewDataTypeList(DtString, names), + columnValues: colValues, + columnCount: uint32(len(colNames)), + tableName: tbName, + rowCount: uint32(rowCount), + } +} + +// Rows returns the row num of the DataForm. +func (t *Table) Rows() int { + return int(t.rowCount) +} + +// Columns returns the column num of the DataForm. +func (t *Table) Columns() int { + return int(t.columnCount) +} + +// GetDataForm returns the byte type of the DataForm. +func (t *Table) GetDataForm() DataFormByte { + return DfTable +} + +// GetSubtable instantiates a table with the values in indexes. +// The specified indexes should be less than the number of columns. +func (t *Table) GetSubtable(indexes []int) *Table { + lenCol := len(t.columnValues) + cols := make([]*Vector, lenCol) + for i := 0; i < lenCol; i++ { + cols[i] = t.columnValues[i].GetSubvector(indexes) + } + + return NewTable(t.ColNames, cols) +} + +// GetDataType returns the byte type of the DataType. +func (t *Table) GetDataType() DataTypeByte { + return t.category.DataType +} + +// GetDataTypeString returns the string format of the DataType. +func (t *Table) GetDataTypeString() string { + return GetDataTypeString(t.category.DataType) +} + +// GetColumnByName returns the column in table with the column name. +func (t *Table) GetColumnByName(colName string) *Vector { + for k, v := range t.ColNames { + if v == colName { + return t.columnValues[k] + } + } + + return nil +} + +// GetColumnByIndex returns the column in table with the column index. +func (t *Table) GetColumnByIndex(ind int) *Vector { + if ind >= int(t.columnCount) { + return nil + } + + return t.columnValues[ind] +} + +// GetColumnNames returns all column names of the table. +func (t *Table) GetColumnNames() []string { + return t.ColNames +} + +// Render serializes the DataForm with bo and input it into w. +func (t *Table) Render(w *protocol.Writer, bo protocol.ByteOrder) error { + err := t.category.render(w) + if err != nil { + return err + } + + err = t.renderLength(w, bo) + if err != nil { + return err + } + + err = t.tableName.Render(w, bo) + if err != nil { + return err + } + + err = t.columnNames.Render(w, bo) + if err != nil { + return err + } + + for _, v := range t.columnValues { + err = v.Render(w, bo) + if err != nil { + return err + } + } + + return nil +} + +func (t *Table) renderLength(w *protocol.Writer, bo protocol.ByteOrder) error { + buf := make([]byte, 8) + bo.PutUint32(buf[0:4], t.rowCount) + bo.PutUint32(buf[4:8], t.columnCount) + + return w.Write(buf) +} + +// String returns the string of the DataForm. +func (t *Table) String() string { + by := strings.Builder{} + by.WriteString(fmt.Sprintf("table[%dr][%dc]([\n\t", t.rowCount, t.columnCount)) + + for k, v := range t.ColNames { + val := t.columnValues[k].formatString() + + dt := GetDataTypeString(t.columnValues[k].GetDataType()) + if len(val) == 0 { + by.WriteString(fmt.Sprintf(" %s[%d]('%s', null)\n\t", dt, t.columnValues[k].RowCount, v)) + } else { + by.WriteString(fmt.Sprintf(" %s[%d]('%s', [%s])\n\t", dt, t.columnValues[k].RowCount, v, strings.Join(val, ", "))) + } + } + by.WriteString("])") + + return by.String() +} diff --git a/model/table_test.go b/model/table_test.go new file mode 100644 index 0000000..71094b4 --- /dev/null +++ b/model/table_test.go @@ -0,0 +1,41 @@ +package model + +import ( + "bytes" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/stretchr/testify/assert" +) + +const tableExpect = "table[3r][1c]([\n\t string[3]('col', [col1, col2, col3])\n\t])" + +func TestTable(t *testing.T) { + col, err := NewDataTypeListWithRaw(DtString, []string{"col1", "col2", "col3"}) + assert.Nil(t, err) + + tb := NewTable([]string{"col"}, []*Vector{NewVector(col)}) + assert.Equal(t, tb.GetDataForm(), DfTable) + assert.Equal(t, tb.Rows(), 3) + assert.Equal(t, tb.GetDataType(), DtVoid) + assert.Equal(t, tb.GetDataTypeString(), "void") + + colNames := tb.GetColumnNames() + assert.Equal(t, colNames, []string{"col"}) + + colV := tb.GetColumnByName(colNames[0]) + assert.Equal(t, colV.String(), "vector([col1, col2, col3])") + + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = tb.Render(w, protocol.LittleEndian) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.String(), "\x00\x06\x03\x00\x00\x00\x01\x00\x00\x00\x00col\x00\x12\x01\x03\x00\x00\x00\x01\x00\x00\x00col1\x00col2\x00col3\x00") + assert.Equal(t, tb.String(), tableExpect) + + tb = tb.GetSubtable([]int{0, 2}) + colV = tb.GetColumnByIndex(0) + assert.Equal(t, colV.String(), "vector([col1, col3])") + assert.Equal(t, tb.Columns(), 1) +} diff --git a/model/util.go b/model/util.go new file mode 100644 index 0000000..1125f58 --- /dev/null +++ b/model/util.go @@ -0,0 +1,271 @@ +package model + +import ( + "errors" + "strconv" + "time" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +var dataTypeStringMap = map[DataTypeByte]string{ + DtVoid: "void", + DtBool: "bool", + DtChar: "char", + DtShort: "short", + DtInt: "int", + DtLong: "long", + DtDate: "date", + DtMonth: "month", + DtTime: "time", + DtMinute: "minute", + DtSecond: "second", + DtDatetime: "datetime", + DtTimestamp: "timestamp", + DtNanoTime: "nanotime", + DtNanoTimestamp: "nanotimestamp", + DtFloat: "float", + DtDouble: "double", + DtSymbol: "symbol", + DtString: "string", + DtUUID: "uuid", + DtFunction: "function", + DtHandle: "handle", + DtCode: "code", + DtDatasource: "datasource", + DtResource: "resource", + DtAny: "any", + DtCompress: "compress", + DtDictionary: "dictionary", + DtDateHour: "dateHour", + DtDateMinute: "dateMinute", + DtIP: "IP", + DtInt128: "int128", + DtBlob: "blob", + dt33: "Dt33", + DtComplex: "complex", + DtPoint: "point", + DtDuration: "duration", + DtObject: "object", +} + +var dataFormStringMap = map[DataFormByte]string{ + DfChart: "chart", + DfChunk: "chunk", + DfDictionary: "dictionary", + DfMatrix: "matrix", + DfPair: "pair", + DfScalar: "scalar", + DfSet: "set", + DfTable: "table", + DfVector: "vector", +} + +var durationUnit = map[uint32]string{ + 0: "ns", + 1: "us", + 2: "ms", + 3: "s", + 4: "m", + 5: "H", + 6: "d", + 7: "w", + 8: "M", + 9: "y", + 10: "B", +} + +var durationUnitReverse = map[string]uint32{ + "ns": 0, + "us": 1, + "ms": 2, + "s": 3, + "m": 4, + "H": 5, + "d": 6, + "w": 7, + "M": 8, + "y": 9, + "B": 10, +} + +// GetDataTypeString returns the data type in the string format based on its byte format. +func GetDataTypeString(t DataTypeByte) string { + dts := "" + if t > 128 { + return "symbolExtend" + } else if t > 64 { + t -= 64 + dts = "Array" + } + + dts = dataTypeStringMap[t] + dts + return dts +} + +// GetDataFormString returns the data form in the string format based on its byte format. +func GetDataFormString(t DataFormByte) string { + return dataFormStringMap[t] +} + +// GetCategory returns the category string according to the dt. +func GetCategory(d DataTypeByte) CategoryString { + if d > 128 { + d -= 128 + } else if d > 64 { + d -= 64 + } + + switch { + case d == DtTime || d == DtSecond || d == DtMinute || d == DtDate || d == DtDatetime || + d == DtMonth || d == DtTimestamp || d == DtNanoTime || d == DtNanoTimestamp || + d == DtDateHour || d == DtDateMinute: + return TEMPORAL + case d == DtInt || d == DtLong || d == DtShort || d == DtChar: + return INTEGRAL + case d == DtBool: + return LOGICAL + case d == DtFloat || d == DtDouble: + return FLOATING + case d == DtString || d == DtSymbol: + return LITERAL + case d == DtInt128 || d == DtUUID || d == DtIP: + return BINARY + case d == DtAny: + return MIXED + case d == DtVoid: + return NOTHING + default: + return SYSTEM + } +} + +// CastDateTime casts src to other DataForm according to the dt. +func CastDateTime(src DataForm, d DataTypeByte) (DataForm, error) { + switch src.GetDataForm() { + case DfScalar: + return castScalarDateTime(src.(*Scalar), d) + case DfVector: + return castVectorDateTime(src.(*Vector), d) + default: + return nil, errors.New("the source data must be a temporal scalar/vector") + } +} + +func castScalarDateTime(src *Scalar, dtb DataTypeByte) (*Scalar, error) { + res, err := castDateTypeDateTime(src.DataType) + if err != nil { + return nil, err + } + + dt, err := NewDataType(dtb, res) + if err != nil { + return nil, err + } + + return NewScalar(dt), nil +} + +func castVectorDateTime(src *Vector, dtb DataTypeByte) (*Vector, error) { + rows := src.Rows() + dtl := make([]int32, rows) + for i := 0; i < rows; i++ { + raw := src.Data.Get(i) + if raw.DataType() == DtAny { + sca := raw.Value().(*Scalar) + raw = sca.DataType + } + + t, err := castDateTypeDateTime(raw) + if err != nil { + return nil, err + } + + switch dtb { + case DtMonth: + dtl[i] = renderMonthFromTime(t) + case DtDate: + dtl[i] = renderDateFromTime(t) + case DtDateHour: + dtl[i] = renderDateHourFromTime(t) + case DtTime: + dtl[i] = renderTimeFromTime(t) + default: + return nil, errors.New("failed to cast vector of DateTime type") + } + } + + l := &dataTypeList{ + t: dtb, + bo: protocol.LittleEndian, + count: rows, + intData: dtl, + } + + return NewVector(l), nil +} + +func castDateTypeDateTime(raw DataType) (time.Time, error) { + dt := raw.(*dataType) + + var t time.Time + switch dt.DataType() { + case DtNanoTimestamp: + res := dt.data.(int64) + t = time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC). + Add(time.Duration(res) * time.Nanosecond) + case DtTimestamp: + res := dt.data.(int64) + t = time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC). + Add(time.Duration(res) * time.Millisecond) + case DtDatetime: + res := dt.data.(int32) + t = time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC). + Add(time.Duration(res) * time.Second) + case DtDate: + res := dt.data.(int32) + t = time.Date(1970, time.Month(1), 1, 0, 0, 0, 0, time.UTC). + Add(time.Duration(res*24) * time.Hour) + default: + return time.Time{}, errors.New("the data type of the source data must be NANOTIMESTAMP, TIMESTAMP, DATE or DATETIME") + } + + return t, nil +} + +func read2Uint32(r protocol.Reader, bo protocol.ByteOrder) (uint32, uint32, error) { + bs, err := r.ReadCertainBytes(8) + if err != nil { + return 0, 0, err + } + + return bo.Uint32(bs), bo.Uint32(bs[4:]), nil +} + +func read2Uint16(r protocol.Reader, bo protocol.ByteOrder) (uint16, uint16, error) { + bs, err := r.ReadCertainBytes(4) + if err != nil { + return 0, 0, err + } + + return bo.Uint16(bs), bo.Uint16(bs[2:]), nil +} + +func stringToUint64(s string) uint64 { + v, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0 + } + + return v +} + +func contains(raw []string, s string) (int, bool) { + for k, v := range raw { + if v == s { + return k, true + } + } + + return 0, false +} diff --git a/model/util_test.go b/model/util_test.go new file mode 100644 index 0000000..2ce7f6c --- /dev/null +++ b/model/util_test.go @@ -0,0 +1,175 @@ +package model + +import ( + "bytes" + "testing" + "time" + + "github.com/dolphindb/api-go/dialer/protocol" + + "github.com/stretchr/testify/assert" +) + +func TestUtil(t *testing.T) { + dts := GetDataTypeString(4) + assert.Equal(t, dts, "int") + + dts = GetDataTypeString(68) + assert.Equal(t, dts, "intArray") + + dts = GetDataTypeString(145) + assert.Equal(t, dts, "symbolExtend") + + by := bytes.NewBuffer([]byte{1, 0, 0, 0, 2, 0, 0, 0, 1, 0, 2, 0}) + r := protocol.NewReader(by) + + row, col, err := read2Uint32(r, protocol.LittleEndian) + assert.Nil(t, err) + assert.Equal(t, row, uint32(1)) + assert.Equal(t, col, uint32(2)) + + row16, col16, err := read2Uint16(r, protocol.LittleEndian) + assert.Nil(t, err) + assert.Equal(t, row16, uint16(1)) + assert.Equal(t, col16, uint16(2)) + + u64 := stringToUint64("3e8") + assert.Equal(t, u64, uint64(1000)) + + df := GetDataFormString(DfVector) + assert.Equal(t, df, "vector") + + df = GetDataFormString(DfChart) + assert.Equal(t, df, "chart") + + df = GetDataFormString(DfChunk) + assert.Equal(t, df, "chunk") + + df = GetDataFormString(DfScalar) + assert.Equal(t, df, "scalar") + + df = GetDataFormString(DfPair) + assert.Equal(t, df, "pair") + + df = GetDataFormString(DfDictionary) + assert.Equal(t, df, "dictionary") + + df = GetDataFormString(DfMatrix) + assert.Equal(t, df, "matrix") + + df = GetDataFormString(DfSet) + assert.Equal(t, df, "set") + + df = GetDataFormString(DfTable) + assert.Equal(t, df, "table") + + cat := GetCategory(DtTime) + assert.Equal(t, cat, TEMPORAL) + + cat = GetCategory(DtBlob) + assert.Equal(t, cat, SYSTEM) + + cat = GetCategory(DtInt) + assert.Equal(t, cat, INTEGRAL) + + cat = GetCategory(DtBool) + assert.Equal(t, cat, LOGICAL) + + cat = GetCategory(DtFloat) + assert.Equal(t, cat, FLOATING) + + cat = GetCategory(DtString) + assert.Equal(t, cat, LITERAL) + + cat = GetCategory(DtInt128) + assert.Equal(t, cat, BINARY) + + cat = GetCategory(DtAny) + assert.Equal(t, cat, MIXED) + + srcDt, err := NewDataType(DtTime, time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC)) + assert.Nil(t, err) + + sca := NewScalar(srcDt) + _, err = CastDateTime(sca, DtTime) + assert.Equal(t, "the data type of the source data must be NANOTIMESTAMP, TIMESTAMP, DATE or DATETIME", err.Error()) + + srcDt, err = NewDataType(DtNanoTimestamp, time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC)) + assert.Nil(t, err) + + sca = NewScalar(srcDt) + res, err := CastDateTime(sca, DtTime) + assert.Nil(t, err) + assert.Equal(t, res.GetDataForm(), DfScalar) + assert.Equal(t, res.String(), "time(01:01:01.000)") + + srcDt, err = NewDataType(DtTimestamp, time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC)) + assert.Nil(t, err) + + sca = NewScalar(srcDt) + res, err = CastDateTime(sca, DtTime) + assert.Nil(t, err) + assert.Equal(t, res.GetDataForm(), DfScalar) + assert.Equal(t, res.String(), "time(01:01:01.000)") + + srcDt, err = NewDataType(DtDatetime, time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC)) + assert.Nil(t, err) + + sca = NewScalar(srcDt) + res, err = CastDateTime(sca, DtTime) + assert.Nil(t, err) + assert.Equal(t, res.GetDataForm(), DfScalar) + assert.Equal(t, res.String(), "time(01:01:01.000)") + + srcDt, err = NewDataType(DtDate, time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC)) + assert.Nil(t, err) + + sca = NewScalar(srcDt) + res, err = CastDateTime(sca, DtTime) + assert.Nil(t, err) + assert.Equal(t, res.GetDataForm(), DfScalar) + assert.Equal(t, res.String(), "time(00:00:00.000)") + + srcDtl, err := NewDataTypeListWithRaw(DtTime, []time.Time{time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC)}) + assert.Nil(t, err) + + vct := NewVector(srcDtl) + _, err = CastDateTime(vct, DtTime) + assert.Equal(t, "the data type of the source data must be NANOTIMESTAMP, TIMESTAMP, DATE or DATETIME", err.Error()) + + srcDtl, err = NewDataTypeListWithRaw(DtNanoTimestamp, []time.Time{time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC)}) + assert.Nil(t, err) + + vct = NewVector(srcDtl) + res, err = CastDateTime(vct, DtDate) + assert.Nil(t, err) + assert.Equal(t, res.GetDataForm(), DfVector) + assert.Equal(t, res.String(), "vector([2022.01.01])") + + srcDtl, err = NewDataTypeListWithRaw(DtTimestamp, []time.Time{time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC)}) + assert.Nil(t, err) + + vct = NewVector(srcDtl) + res, err = CastDateTime(vct, DtMonth) + assert.Nil(t, err) + assert.Equal(t, res.GetDataForm(), DfVector) + assert.Equal(t, res.String(), "vector([2022.01M])") + + srcDtl, err = NewDataTypeListWithRaw(DtDatetime, []time.Time{time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC)}) + assert.Nil(t, err) + + vct = NewVector(srcDtl) + res, err = CastDateTime(vct, DtMonth) + assert.Nil(t, err) + assert.Equal(t, res.GetDataForm(), DfVector) + assert.Equal(t, res.String(), "vector([2022.01M])") + + srcDtl, err = NewDataTypeListWithRaw(DtDate, []time.Time{time.Date(2022, 1, 1, 1, 1, 1, 1, time.UTC)}) + assert.Nil(t, err) + + vct = NewVector(srcDtl) + res, err = CastDateTime(vct, DtDateHour) + assert.Nil(t, err) + assert.Equal(t, res.GetDataForm(), DfVector) + assert.Equal(t, res.String(), "vector([2022.01.01T00])") +} diff --git a/model/vector.go b/model/vector.go new file mode 100644 index 0000000..28a5bca --- /dev/null +++ b/model/vector.go @@ -0,0 +1,545 @@ +package model + +import ( + "errors" + "fmt" + "math" + "strings" + + "github.com/dolphindb/api-go/dialer/protocol" +) + +// Vector is a DataForm. +// Refer to https://www.dolphindb.cn/cn/help/130/DataTypesandStructures/DataForms/Vector/index.html for more details. +type Vector struct { + category *Category + + RowCount uint32 + ColumnCount uint32 + + // If the DataTypeByte of the Vector is less than 64, Data stores the values of the vector. + // If the DataTypeByte of the Vector is greater than 64 and less than 128, Data is invalid. + // If the DataTypeByte of the Vector is greater than 128, Data stores the indexs of the values and + // the value are stored in the Base of Extend. + // You can call GetDataType() to get the DataTypeByte of the Vector. + Data DataTypeList + // ArrayVector is a special form of data for DolphinDB. Unlike a regular vector, + // each of its elements is an array with the same DataType, but the length can vary. + // ArrayVector is only valid when the DataTypeByte of the Vector is greater than 64 and less than 128. + // You can call GetDataType() to get the DataTypeByte of the Vector. + ArrayVector []*ArrayVector + // Extend is only valid when the DataTypeByte of the Vector is greater than 128. + // The Base of the Extend stores the values of the vector. + // You can call GetDataType() to get the DataTypeByte of the Vector. + Extend *DataTypeExtend +} + +// ArrayVector is an element type of Vector. +type ArrayVector struct { + rowCount uint16 + unit uint16 + lengths []byte + + data DataTypeList +} + +// DataTypeExtend is only valid for Symbol DataType. +type DataTypeExtend struct { + BaseID uint32 + BaseSize uint32 + + Base DataTypeList +} + +// NewVector returns an object of vector with specified data. +// You can instantiate the data by NewDataTypeList or NewDataTypeListWithRaw. +func NewVector(data DataTypeList) *Vector { + return &Vector{ + category: &Category{ + DataForm: DfVector, + DataType: data.DataType(), + }, + Data: data, + ColumnCount: 1, + RowCount: uint32(data.Len()), + } +} + +// NewVectorWithArrayVector returns an object of vector according to the data. +// You can instantiates the data by NewArrayVector. +func NewVectorWithArrayVector(data []*ArrayVector) *Vector { + if len(data) == 0 { + return nil + } + + dt := data[0].data.DataType() + 64 + if dt == 81 || dt == 82 { + return nil + } + + res := &Vector{ + category: &Category{ + DataForm: DfVector, + DataType: dt, + }, + ArrayVector: data, + ColumnCount: uint32(len(data)), + } + + for _, v := range data { + res.RowCount += uint32(v.rowCount) + } + + res.category = newCategory(byte(DfVector), byte(dt)) + return res +} + +// NewArrayVector returns an object of ArrayVector with specified data. +// You can initialize the data by using NewDataTypeList or NewDataTypeListWithRaw. +func NewArrayVector(vl []*Vector) []*ArrayVector { + res := make([]*ArrayVector, len(vl)) + for k, v := range vl { + av := &ArrayVector{ + data: v.Data, + rowCount: 1, + } + + row := v.Rows() + + av.unit, av.lengths = packArrayVector(av.rowCount, uint32(row)) + res[k] = av + } + + return res +} + +// GetDataForm returns the byte type of the DataForm. +func (vct *Vector) GetDataForm() DataFormByte { + return DfVector +} + +// Set sets DataType of the vct with ind. +// ArrayVector does not support Set. +// If ind >= len(vct.Data), return an error, +// otherwise cover the original value. +func (vct *Vector) Set(ind int, d DataType) error { + if vct.Extend != nil { + if vct.Extend.BaseSize == 0 { + return nil + } + + ind := vct.Extend.Base.AsOf(d) + d = &dataType{ + t: DtInt, + bo: protocol.LittleEndian, + data: int32(ind), + } + } + + return vct.Data.Set(ind, d) +} + +// Get gets DataType from vct. +// If ind exceeds the size of Vector, return nil. +func (vct *Vector) Get(ind int) DataType { + if ind >= vct.Rows()*int(vct.ColumnCount) && vct.ArrayVector == nil { + return nil + } + + switch { + case vct.Extend != nil: + if vct.Extend.BaseSize == 0 { + return &dataType{ + t: DtString, + bo: protocol.LittleEndian, + data: "", + } + } + + raw := vct.Data.ElementValue(ind) + if raw == nil { + return nil + } + + return vct.Extend.Base.Get(int(raw.(int32))) + case vct.Data != nil: + return vct.Data.Get(ind) + case vct.ArrayVector != nil: + for _, v := range vct.ArrayVector { + rc := v.data.Len() + if ind < rc { + return v.data.Get(ind) + } + + ind -= rc + } + } + + return nil +} + +// GetVectorValue returns the element of the ArrayVector based on the ind. +func (vct *Vector) GetVectorValue(ind int) *Vector { + if ind >= vct.Rows() { + return nil + } + + if vct.ArrayVector != nil { + for _, v := range vct.ArrayVector { + rc := int(v.rowCount) + if ind < rc { + st := 0 + for k, l := range v.lengths { + if k == ind { + return NewVector(v.data.Sub(st, st+int(l))) + } + + st += int(l) + } + } + + ind -= rc + } + } + + return nil +} + +// // IsArrayVector checks whether the vector is ArrayVector +// func (vct *Vector) IsArrayVector() bool { +// if vct.GetDataType() > 64 && vct.GetDataType() < 128 { +// return true +// } + +// return false +// } + +// GetDataType returns the byte type of the DataType. +func (vct *Vector) GetDataType() DataTypeByte { + return vct.category.DataType +} + +// Combine combines two Vectors and returns a new one. +// ArrayVector does not support Combine. +func (vct *Vector) Combine(in *Vector) (*Vector, error) { + if vct.Extend != nil { + if in.Extend == nil { + return nil, errors.New("invalid vector, the Extend of input cannot be nil") + } + + indMap, nb := combineBase(vct, in) + data := combineData(indMap, vct, in) + return &Vector{ + category: &Category{DataForm: DfVector, DataType: DtSymbol + 128}, + ColumnCount: 1, + RowCount: vct.RowCount + in.RowCount, + Extend: &DataTypeExtend{ + BaseID: vct.Extend.BaseID, + BaseSize: uint32(nb.count), + Base: nb, + }, + Data: data, + }, nil + } else if vct.ArrayVector == nil { + dtl, err := vct.Data.combine(in.Data) + if err != nil { + return nil, err + } + + return NewVector(dtl), nil + } + + return nil, errors.New("ArrayVector does not support Combine") +} + +func combineData(indMap map[int]int, vct, in *Vector) *dataTypeList { + od := vct.Data.(*dataTypeList) + idt := in.Data.(*dataTypeList) + d := &dataTypeList{ + t: DtInt, + bo: protocol.LittleEndian, + count: od.Len() + idt.Len(), + intData: make([]int32, od.Len(), od.Len()+idt.Len()), + } + + copy(d.intData, od.intData) + + for _, v := range idt.intData { + d.intData = append(d.intData, int32(indMap[int(v)])) + } + + return d +} + +func combineBase(vct, in *Vector) (map[int]int, *dataTypeList) { + l := vct.Extend.Base.Len() + obd := vct.Extend.Base.(*dataTypeList) + ibd := in.Extend.Base.(*dataTypeList) + + nb := &dataTypeList{ + t: DtString, + bo: protocol.LittleEndian, + count: l, + stringData: make([]string, l), + } + + copy(nb.stringData, obd.stringData) + + indMap := make(map[int]int) + for k, v := range ibd.stringData { + if ind, ok := contains(obd.stringData, v); ok { + indMap[k] = ind + } else { + indMap[k] = nb.count + nb.stringData = append(nb.stringData, v) + nb.count++ + } + } + + return indMap, nb +} + +// SetNull sets the value of DataType in vector to null based on ind. +// ArrayVector does not support SetNull. +func (vct *Vector) SetNull(ind int) { + switch { + case vct.Extend != nil: + if vct.Extend.BaseSize == 0 { + return + } + + if err := vct.Data.Set(ind, &dataType{t: DtInt, data: int32(0)}); err != nil { + return + } + case vct.Data != nil: + vct.Data.SetNull(ind) + case vct.ArrayVector != nil: + } +} + +// IsNull checks whether the value of DataType in vector is null based on the index. +// ArrayVector does not support IsNull. +func (vct *Vector) IsNull(ind int) bool { + switch { + case vct.Extend != nil: + if vct.Extend.BaseSize == 0 { + return true + } + + return vct.Data.ElementString(ind) == "0" + case vct.Data != nil: + return vct.Data.IsNull(ind) + case vct.ArrayVector != nil: + for _, av := range vct.ArrayVector { + if ind < av.data.Len() { + return av.data.IsNull(ind) + } + + ind -= av.data.Len() + } + } + + return true +} + +// Rows returns the row num of the DataForm. +func (vct *Vector) Rows() int { + return int(vct.RowCount) +} + +// HashBucket calculates the hash with the bucket and the value whose index is ind in vct. +func (vct *Vector) HashBucket(ind, bucket int) int { + if vct.Data != nil { + dt := vct.Data.Get(ind) + if dt.DataType() == DtAny { + sca := dt.Value().(*Scalar) + dt = sca.DataType + } + + return dt.HashBucket(bucket) + } + + return 0 +} + +// AsOf returns the index of the d in vct. +// ArrayVector does not support AsOf. +// If d is not in vct, returns -1. +func (vct *Vector) AsOf(d DataType) int { + return vct.Data.AsOf(d) +} + +// Render serializes the DataForm with bo and input it into w. +func (vct *Vector) Render(w *protocol.Writer, bo protocol.ByteOrder) error { + err := vct.category.render(w) + if err != nil { + return err + } + + err = vct.renderLength(w, bo) + if err != nil { + return err + } + + switch { + case vct.category.DataType > 128: + err = vct.renderExtend(w, bo) + case vct.category.DataType > 64: + err = vct.renderArrayVector(w, bo) + default: + err = vct.renderData(w, bo) + } + + return err +} + +// GetSubvector instantiates a Vector with the values in indexes. +// ArrayVector does not support GetSubvector. +// The specified indexes should be less than the length of Vector. +func (vct *Vector) GetSubvector(indexes []int) *Vector { + if vct.Data == nil { + return nil + } + + res := NewVector(vct.Data.GetSubList(indexes)) + if vct.Extend != nil { + res.Extend = vct.Extend + res.category = vct.category + } + + return res +} + +// GetDataTypeString returns the string format of the DataType. +func (vct *Vector) GetDataTypeString() string { + return GetDataTypeString(vct.category.DataType) +} + +// String returns the string of the DataForm. +func (vct *Vector) String() string { + by := strings.Builder{} + + if data := vct.formatString(); data != nil { + by.WriteString(fmt.Sprintf("vector<%s>([%s])", GetDataTypeString(vct.category.DataType), strings.Join(data, ", "))) + } else { + by.WriteString(fmt.Sprintf("vector<%s>(null)", GetDataTypeString(vct.category.DataType))) + } + + return by.String() +} + +func (vct *Vector) renderArrayVector(w *protocol.Writer, bo protocol.ByteOrder) error { + for _, v := range vct.ArrayVector { + buf := make([]byte, 4) + + bo.PutUint16(buf[0:2], v.rowCount) + bo.PutUint16(buf[2:4], v.unit) + err := w.Write(buf) + if err != nil { + return err + } + + if v.data.Len() > 0 { + err = w.Write(v.lengths) + if err != nil { + return err + } + } + + err = v.data.Render(w, bo) + if err != nil { + return err + } + } + + return nil +} + +func (vct *Vector) renderExtend(w *protocol.Writer, bo protocol.ByteOrder) error { + ext := vct.Extend + buf := make([]byte, 8) + bo.PutUint32(buf[0:4], ext.BaseID) + bo.PutUint32(buf[4:8], ext.BaseSize) + err := w.Write(buf) + if err != nil { + return err + } + + if ext.BaseSize != 0 { + err = ext.Base.Render(w, bo) + if err != nil { + return err + } + } + + err = vct.Data.Render(w, bo) + if err != nil { + return err + } + + return nil +} + +func (vct *Vector) renderLength(w *protocol.Writer, bo protocol.ByteOrder) error { + buf := make([]byte, 8) + bo.PutUint32(buf[0:4], vct.RowCount) + bo.PutUint32(buf[4:8], vct.ColumnCount) + + return w.Write(buf) +} + +func (vct *Vector) renderData(w *protocol.Writer, bo protocol.ByteOrder) error { + return vct.Data.Render(w, bo) +} + +func (vct *Vector) formatString() []string { + val := make([]string, 0) + switch { + case vct.Extend != nil: + d := vct.Data.(*dataTypeList) + sl := vct.Extend.Base.StringList() + for _, v := range d.intData { + val = append(val, sl[v]) + } + case vct.Data != nil: + val = vct.Data.StringList() + case len(vct.ArrayVector) > 0: + for _, v := range vct.ArrayVector { + asl := v.data.StringList() + si := 0 + for _, l := range v.lengths { + length := int(l) + val = append(val, fmt.Sprintf("[%s]", strings.Join(asl[si:si+length], ", "))) + si += length + } + } + } + + return val +} + +func packArrayVector(rowcount uint16, length uint32) (uint16, []byte) { + switch { + case length < math.MaxUint8: + res := make([]int8, rowcount) + for i := 0; i < int(rowcount); i++ { + res[i] = int8(length) + } + + return 1, protocol.ByteSliceFromInt8Slice(res) + case length < math.MaxUint16: + res := make([]int16, rowcount) + for i := 0; i < int(rowcount); i++ { + res[i] = int16(length) + } + + return 2, protocol.ByteSliceFromInt16Slice(res) + default: + res := make([]int32, rowcount) + for i := 0; i < int(rowcount); i++ { + res[i] = int32(length) + } + + return 4, protocol.ByteSliceFromInt32Slice(res) + } +} diff --git a/model/vector_test.go b/model/vector_test.go new file mode 100644 index 0000000..9e8e3d7 --- /dev/null +++ b/model/vector_test.go @@ -0,0 +1,108 @@ +package model + +import ( + "bytes" + "testing" + + "github.com/dolphindb/api-go/dialer/protocol" + + "github.com/stretchr/testify/assert" +) + +func TestVector(t *testing.T) { + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + + dtl, err := NewDataTypeListWithRaw(DtInt, []int32{0, 1}) + assert.Nil(t, err) + assert.Equal(t, dtl.DataType(), DtInt) + + vc := NewVector(dtl) + assert.Equal(t, vc.GetDataForm(), DfVector) + assert.Equal(t, vc.GetDataType(), DtInt) + assert.Equal(t, vc.GetDataTypeString(), "int") + assert.Equal(t, vc.Rows(), 2) + assert.Equal(t, vc.AsOf(dtl.Get(0)), 0) + assert.Equal(t, vc.AsOf(dtl.Get(1)), 1) + assert.Equal(t, vc.HashBucket(0, 10), 0) + assert.Equal(t, vc.HashBucket(1, 10), 1) + + vc.SetNull(1) + assert.True(t, vc.IsNull(1)) + + err = vc.Set(1, vc.Get(0)) + assert.Nil(t, err) + assert.Equal(t, vc.Rows(), 2) + + err = vc.Render(w, protocol.LittleEndian) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, by.String(), "\x04\x01\x02\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00") + assert.Equal(t, vc.String(), "vector([0, 0])") + + vc.Data = dtl.Sub(0, 1) + vc.RowCount = 1 + dtl, err = NewDataTypeListWithRaw(DtString, []string{"vector", "zero"}) + assert.Nil(t, err) + + vc.Extend = &DataTypeExtend{ + BaseID: 10, + BaseSize: 1, + Base: dtl.Sub(1, 2), + } + vc.category.DataType = 145 + + by.Reset() + err = vc.Render(w, protocol.LittleEndian) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, vc.Rows(), 1) + assert.Equal(t, by.String(), "\x91\x01\x01\x00\x00\x00\x01\x00\x00\x00\n\x00\x00\x00\x01\x00\x00\x00zero\x00\x00\x00\x00\x00") + assert.Equal(t, vc.String(), "vector([zero])") + + combineDtl, err := NewDataTypeListWithRaw(DtInt, []int32{0, 1, 1, 0}) + assert.Nil(t, err) + tmp := NewVector(combineDtl) + tmp.Extend = &DataTypeExtend{ + BaseID: 10, + BaseSize: 2, + Base: dtl, + } + tmp.category.DataType = 145 + cv, err := vc.Combine(tmp) + assert.Nil(t, err) + assert.Equal(t, cv.String(), "vector([zero, vector, zero, zero, vector])") + + dt := vc.Get(0) + assert.Equal(t, dt.String(), "zero") + + vc.SetNull(0) + assert.True(t, vc.IsNull(0)) + + dtl, err = NewDataTypeListWithRaw(DtInt, []int32{1, 2, 3}) + assert.Nil(t, err) + assert.Equal(t, dtl.DataType(), DtInt) + + by.Reset() + + vc = NewVector(dtl) + vc = NewVectorWithArrayVector(NewArrayVector([]*Vector{vc})) + err = vc.Render(w, protocol.LittleEndian) + w.Flush() + assert.Nil(t, err) + assert.Equal(t, vc.Rows(), 1) + assert.Equal(t, by.String(), "D\x01\x01\x00\x00\x00\x01\x00\x00\x00\x01\x00\x01\x00\x03\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00") + assert.Equal(t, vc.String(), "vector([[1, 2, 3]])") + + v := vc.GetVectorValue(0) + assert.Equal(t, v.String(), "vector([1, 2, 3])") + assert.False(t, vc.IsNull(0)) + + unit, byt := packArrayVector(4, 30000) + assert.Equal(t, unit, uint16(2)) + assert.Equal(t, byt, []byte{0x30, 0x75, 0x30, 0x75, 0x30, 0x75, 0x30, 0x75}) + + unit, byt = packArrayVector(4, 65536) + assert.Equal(t, unit, uint16(4)) + assert.Equal(t, byt, []byte{0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x0}) +} diff --git a/multigoroutinetable/multi_goroutine_table.go b/multigoroutinetable/multi_goroutine_table.go new file mode 100644 index 0000000..1ad2297 --- /dev/null +++ b/multigoroutinetable/multi_goroutine_table.go @@ -0,0 +1,489 @@ +package multigoroutinetable + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/domain" + "github.com/dolphindb/api-go/model" +) + +// MultiGoroutineTable is used to insert data into a table with multiple goroutines. +type MultiGoroutineTable struct { + database, tableName, errorInfo string + batchSize, throttle, goroutineByColIndexForNonPartition int + hasError, isPartition bool + colNames []string + colTypes []int + partitionColumnIdx int32 + + partitionDomain domain.Domain + goroutines []*writerGoroutine +} + +// Option is used to configure MultiGoroutineTable. +type Option struct { + // database path or database handle + Database string + // name of the table + TableName string + // address of the dolphindb server + Address string + // user id + UserID string + // password of the user + Password string + // the amount of data processed at one time + BatchSize int + // timeout. unit: millisecond + Throttle int + // the number of coroutine + GoroutineCount int + // the partitioning column name + PartitionCol string +} + +// NewMultiGoroutineTable instantiates an instance of MultiGoroutineTable with MultiGoroutineTableOption. +func NewMultiGoroutineTable(opt *Option) (*MultiGoroutineTable, error) { + mtt, err := initMultiGoroutineTable(opt) + if err != nil { + fmt.Printf("Failed to instantiate MultiGoroutineTable: %s\n", err.Error()) + return nil, err + } + + schema, err := mtt.getSchema(opt) + if err != nil { + fmt.Printf("Failed to get schema: %s\n", err.Error()) + return nil, err + } + + err = mtt.handleSchemaColumn(schema) + if err != nil { + fmt.Printf("Failed to handle columns of the table returned by function schema: %s\n", err.Error()) + return nil, err + } + + err = mtt.handlePartitionColumnName(schema, opt) + if err != nil { + fmt.Printf("Failed to handle PartitionColumnName: %s\n", err.Error()) + return nil, err + } + + for i := 0; i < opt.GoroutineCount; i++ { + conn, err := dialer.NewSimpleConn(context.TODO(), opt.Address, opt.UserID, opt.Password) + if err != nil { + fmt.Printf("Failed to instantiate a simple connection: %s\n", err.Error()) + return nil, err + } + + wt := newWriterGoroutine(i, mtt, conn) + mtt.goroutines[i] = wt + } + + return mtt, nil +} + +// Insert inserts data into the table. +// The length of args must be equal with the number of columns of the table. +func (mtt *MultiGoroutineTable) Insert(args ...interface{}) error { + if mtt.isExist() { + return errors.New("goroutine already exists") + } + + if len(args) != len(mtt.colTypes) { + return errors.New("column counts don't match") + } + + prow := make([]model.DataType, len(args)) + for k, v := range args { + d, err := packDataType(model.DataTypeByte(mtt.colTypes[k]), v) + if err != nil { + fmt.Printf("Failed to instantiate DataType with arg: %s\n", err.Error()) + return err + } + + prow[k] = d + } + + goroutineInd, err := mtt.getGoroutineInd(prow) + if err != nil { + fmt.Printf("Failed to get goroutine index: %s\n", err.Error()) + return err + } + + mtt.insertGoroutineWrite(goroutineInd, prow) + + return nil +} + +func packDataType(dt model.DataTypeByte, v interface{}) (model.DataType, error) { + if d, ok := v.(model.DataType); ok { + return d, nil + } + + if dt > 64 { + if v == nil { + dtl := model.NewEmptyDataTypeList(dt, 1) + vct := model.NewVector(dtl) + return model.NewDataType(model.DtAny, vct) + } + + dtl, err := model.NewDataTypeListWithRaw(dt, v) + if err != nil { + return nil, err + } + + if dtl.Len() == 0 { + dtl = model.NewEmptyDataTypeList(dt, 1) + } + + vct := model.NewVector(dtl) + return model.NewDataType(model.DtAny, vct) + } + + return model.NewDataType(dt, v) +} + +// GetStatus returns the status for the instance of MultiGoroutineTable. +func (mtt *MultiGoroutineTable) GetStatus() *Status { + s := &Status{ + ErrMsg: mtt.errorInfo, + IsExit: mtt.isExist(), + GoroutineStatusList: make([]*GoroutineStatus, len(mtt.goroutines)), + } + + for k, v := range mtt.goroutines { + ts := new(GoroutineStatus) + v.getStatus(ts) + s.SentRows += ts.SentRows + s.UnSentRows += ts.UnSentRows + s.FailedRows += ts.FailedRows + s.GoroutineStatusList[k] = ts + } + + return s +} + +// GetUnwrittenData returns the total of unsent data and failed data. +func (mtt *MultiGoroutineTable) GetUnwrittenData() [][]model.DataType { + data := make([][]model.DataType, 0) +loop: + for _, v := range mtt.goroutines { + failed: + for { + select { + case val := <-v.failedQueue.Out: + data = append(data, val.([]model.DataType)) + default: + break failed + } + } + + for { + select { + case val := <-v.writeQueue.Out: + data = append(data, val.([]model.DataType)) + default: + break loop + } + } + } + + return data +} + +// InsertUnwrittenData inserts data into the table. +// You can insert data obtained from GetUnwrittenData with this function. +func (mtt *MultiGoroutineTable) InsertUnwrittenData(records [][]model.DataType) error { + if mtt.isExist() { + return errors.New("goroutine already exists") + } + + if len(mtt.goroutines) > 1 { + if mtt.isPartition { + pvc, err := mtt.packVector(records, int(mtt.partitionColumnIdx)) + if err != nil { + fmt.Printf("Failed to pack vector: %s\n", err.Error()) + return err + } + + goroutineIndexes, err := mtt.partitionDomain.GetPartitionKeys(pvc) + if err != nil { + fmt.Printf("Failed to call GetPartitionKeys: %s\n", err.Error()) + return err + } + + for k, v := range goroutineIndexes { + mtt.insertGoroutineWrite(v, records[k]) + } + } else { + pvc, err := mtt.packVector(records, mtt.goroutineByColIndexForNonPartition) + if err != nil { + fmt.Printf("Failed to package vector: %s\n", err.Error()) + return err + } + + for k, v := range records { + goroutineInd := pvc.HashBucket(k, len(mtt.goroutines)) + mtt.insertGoroutineWrite(goroutineInd, v) + } + } + } else { + for _, v := range records { + mtt.insertGoroutineWrite(0, v) + } + } + + return nil +} + +// WaitForGoroutineCompletion waits for the data to be sent completely and exits the MultiGoroutineTable. +// An error will be thrown if you call Insert or InsertUnwrittenData after the MultiGoroutineTable exits. +func (mtt *MultiGoroutineTable) WaitForGoroutineCompletion() { + for _, v := range mtt.goroutines { + v.stop() + //nolint + for !v.isFinished { + // loop + } + + if v.Conn != nil { + v.Conn.Close() + } + + v.Conn = nil + } + + mtt.hasError = true +} + +func (mtt *MultiGoroutineTable) handleSchemaColumn(schema *model.Dictionary) error { + dt, err := schema.Get("colDefs") + if err != nil { + fmt.Printf("Failed to get cofDefs: %s\n", err.Error()) + return err + } + + colDefs := dt.Value().(*model.Table) + colDefsName := colDefs.GetColumnByName("name") + mtt.colNames = colDefsName.Data.StringList() + + colDefsTypeInt := colDefs.GetColumnByName("typeInt") + intStr := colDefsTypeInt.Data.StringList() + + mtt.colTypes = make([]int, len(intStr)) + for k, v := range intStr { + mtt.colTypes[k], err = strconv.Atoi(v) + if err != nil { + fmt.Printf("Failed to parse colTypes: %s\n", err.Error()) + return err + } + } + + return nil +} + +func (mtt *MultiGoroutineTable) getSchema(opt *Option) (*model.Dictionary, error) { + conn, err := dialer.NewSimpleConn(context.TODO(), opt.Address, opt.UserID, opt.Password) + if err != nil { + fmt.Printf("Failed to instantiate a simple connection: %s\n", err.Error()) + return nil, err + } + + defer conn.Close() + + var df model.DataForm + if opt.Database == "" { + df, err = conn.RunScript(fmt.Sprintf("schema(%s)", opt.TableName)) + } else { + df, err = conn.RunScript(fmt.Sprintf("schema(loadTable(\"%s\",\"%s\"))", opt.Database, opt.TableName)) + } + + if err != nil { + fmt.Printf("Failed to call function schema with the specified table %s: %s\n", opt.TableName, err.Error()) + return nil, err + } + + return df.(*model.Dictionary), nil +} + +func (mtt *MultiGoroutineTable) handlePartitionColumnName(schema *model.Dictionary, opt *Option) error { + dt, err := schema.Get("partitionColumnName") + if err != nil { + if !strings.Contains(err.Error(), "invalid key") { + fmt.Printf("Failed to get partitionColumnName: %s\n", err.Error()) + return err + } + + err = mtt.handleNoPartitionColumnName(opt) + if err != nil { + return err + } + } else { + mtt.isPartition = true + partColNames := dt.Value().(model.DataForm) + + partitionSchema, partitionType, err := mtt.handlePartColNames(partColNames, schema, opt.PartitionCol) + if err != nil { + fmt.Printf("Failed to handle partColNames: %s\n", err.Error()) + return err + } + + colType := mtt.colTypes[mtt.partitionColumnIdx] + partitionColType := domain.GetPartitionType(int(partitionType)) + mtt.partitionDomain, err = domain.CreateDomain(partitionColType, model.DataTypeByte(colType), partitionSchema) + if err != nil { + fmt.Printf("Failed to create domain: %s\n", err.Error()) + return err + } + } + + return nil +} + +func (mtt *MultiGoroutineTable) handleNoPartitionColumnName(opt *Option) error { + if opt.Database != "" && opt.GoroutineCount > 1 { + return errors.New("the parameter GoroutineCount must be 1 for a dimension table") + } + + mtt.isPartition = false + + if opt.PartitionCol != "" { + ind := -1 + for i := 0; i < len(mtt.colNames); i++ { + if mtt.colNames[i] == opt.PartitionCol { + ind = i + break + } + } + + if ind < 0 { + return fmt.Errorf("no match found for %s", opt.PartitionCol) + } + + mtt.goroutineByColIndexForNonPartition = ind + } + + return nil +} + +func initMultiGoroutineTable(opt *Option) (*MultiGoroutineTable, error) { + if opt.GoroutineCount < 1 { + return nil, errors.New("the parameter GoroutineCount must be greater than or equal to 1") + } + + mtt := &MultiGoroutineTable{ + database: opt.Database, + tableName: opt.TableName, + batchSize: opt.BatchSize, + throttle: opt.Throttle, + hasError: false, + goroutines: make([]*writerGoroutine, opt.GoroutineCount), + } + + if opt.BatchSize < 1 { + return nil, errors.New("the parameter BatchSize must be greater than or equal to 1") + } + + if opt.Throttle < 1 { + return nil, errors.New("the parameter Throttle must be greater than or equal to 0") + } + + if opt.GoroutineCount > 1 && len(opt.PartitionCol) < 1 { + return nil, errors.New("the parameter PartitionCol must be specified when GoroutineCount is greater than 1") + } + + return mtt, nil +} + +func (mtt *MultiGoroutineTable) getGoroutineInd(prow []model.DataType) (int, error) { + var goroutineInd int + if len(mtt.goroutines) > 1 { + if mtt.isPartition { + s := prow[mtt.partitionColumnIdx] + if s != nil { + dtl := model.NewDataTypeList(s.DataType(), []model.DataType{s}) + pvc := model.NewVector(dtl) + + indexes, err := mtt.partitionDomain.GetPartitionKeys(pvc) + if err != nil { + fmt.Printf("Failed to call GetPartitionKeys: %s\n", err.Error()) + return 0, err + } + + if len(indexes) > 0 { + goroutineInd = indexes[0] + } else { + return 0, errors.New("failed to obtain the partition scheme") + } + } else { + goroutineInd = 0 + } + } else { + if prow[mtt.goroutineByColIndexForNonPartition] != nil { + s := prow[mtt.goroutineByColIndexForNonPartition] + dtl := model.NewDataTypeList(s.DataType(), []model.DataType{s}) + pvc := model.NewVector(dtl) + goroutineInd = pvc.HashBucket(0, len(mtt.goroutines)) + } else { + goroutineInd = 0 + } + } + } else { + goroutineInd = 0 + } + + return goroutineInd, nil +} + +func (mtt *MultiGoroutineTable) insertGoroutineWrite(hashKey int, prow []model.DataType) { + if hashKey < 0 { + hashKey = 0 + } + + ind := hashKey % len(mtt.goroutines) + wt := mtt.goroutines[ind] + wt.writeQueue.In <- prow + + select { + case wt.signal <- true: + default: + } +} + +func (mtt *MultiGoroutineTable) packVector(records [][]model.DataType, ind int) (*model.Vector, error) { + dtArr := make([]model.DataType, len(records)) + dt := model.DataTypeByte(mtt.colTypes[ind]) + for k, row := range records { + if len(row) != len(mtt.colTypes) { + return nil, errors.New("column counts don't match") + } + + if !isEqualDataTypeByte(row[ind].DataType(), dt) { + return nil, fmt.Errorf("column doesn't match. Expect %s, but get %s", + model.GetDataTypeString(row[ind].DataType()), model.GetDataTypeString(dt)) + } + + dtArr[k] = row[ind] + } + + dtl := model.NewDataTypeList(dt, dtArr) + return model.NewVector(dtl), nil +} + +func (mtt *MultiGoroutineTable) isExist() bool { + return mtt.hasError +} + +func isEqualDataTypeByte(a, b model.DataTypeByte) bool { + if a == b || (a == model.DtSymbol && b == model.DtString) || + (b == model.DtSymbol && a == model.DtString) { + return true + } + + return false +} diff --git a/multigoroutinetable/multi_goroutine_table_test.go b/multigoroutinetable/multi_goroutine_table_test.go new file mode 100644 index 0000000..a1e2407 --- /dev/null +++ b/multigoroutinetable/multi_goroutine_table_test.go @@ -0,0 +1,203 @@ +package multigoroutinetable + +import ( + "net" + "os" + "strings" + "testing" + "time" + + "github.com/dolphindb/api-go/model" + + "github.com/stretchr/testify/assert" +) + +const testAddr = "127.0.0.1:3001" + +func TestMultiGoroutineTable(t *testing.T) { + opt := &Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "sym", + Database: "dfs://db", + TableName: "tb", + UserID: "user", + Password: "password", + Address: testAddr, + } + mtt, err := NewMultiGoroutineTable(opt) + assert.Nil(t, err) + + err = mtt.Insert(time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC), "insert") + assert.Nil(t, err) + err = mtt.Insert(time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC), "success") + assert.Nil(t, err) + + df := mtt.GetUnwrittenData() + assert.Equal(t, len(df), 0) + + date, err := model.NewDataType(model.DtDate, time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC)) + assert.Nil(t, err) + + sym, err := model.NewDataType(model.DtString, "insertFailed") + assert.Nil(t, err) + + err = mtt.InsertUnwrittenData([][]model.DataType{ + { + date, sym, + }, + }) + assert.Nil(t, err) + + mtt.WaitForGoroutineCompletion() + + sts := mtt.GetStatus() + assert.Equal(t, sts.IsExit, true) + assert.Equal(t, sts.ErrMsg, "invalid response format. first line items count [2] is less than 3") + assert.Equal(t, sts.FailedRows, 1) + assert.Equal(t, sts.SentRows, 2) + + opt.TableName = "db" + opt.Database = "" + mtt, err = NewMultiGoroutineTable(opt) + assert.Nil(t, err) + err = mtt.Insert(time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC), "insert") + assert.Nil(t, err) + + err = mtt.Insert(time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC), "tested") + assert.Nil(t, err) + + err = mtt.Insert(time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC), "sample") + assert.Nil(t, err) + + err = mtt.Insert(time.Date(1970, 1, 1, 1, 1, 1, 1, time.UTC), "success") + assert.Nil(t, err) + + df = mtt.GetUnwrittenData() + err = mtt.InsertUnwrittenData(df) + assert.Nil(t, err) + + mtt.WaitForGoroutineCompletion() + sts = mtt.GetStatus() + assert.Equal(t, sts.String(), "errMsg : \nisExit : true\nsentRows : 4\nunSentRows : 0\nsendFailedRows : 0\ngoroutineStatus :\n goroutineIndex: 0, sentRows: 1, unSentRows: 0, sendFailedRows: 0\n goroutineIndex: 1, sentRows: 3, unSentRows: 0, sendFailedRows: 0\n") + + opt.GoroutineCount = 1 + opt.Database = "dbScalar" + _, err = NewMultiGoroutineTable(opt) + assert.Nil(t, err) +} + +func TestMain(m *testing.M) { + exit := make(chan bool) + ln, err := net.Listen("tcp", testAddr) + if err != nil { + return + } + + go func() { + for !isExit(exit) { + conn, err := ln.Accept() + if err != nil { + return + } + + go handleData(conn) + } + + ln.Close() + }() + + exitCode := m.Run() + + close(exit) + + os.Exit(exitCode) +} + +func handleData(conn net.Conn) { + res := make([]byte, 0) + for { + buf := make([]byte, 512) + l, err := conn.Read(buf) + if err != nil { + continue + } + + res = append(res, buf[0:l]...) + script := string(res) + length := len(res) + var resp []byte + + if length == 136 && strings.Contains(script, "tableInsert") { + resp = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x30, 0x0a, 0x4f, 0x4b, 0x0a} + } else if length == 49 && strings.Contains(script, "login") { + resp = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x30, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a} + } else if length == 15 && strings.Contains(script, "connect") { + resp = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x30, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a} + } else if (length == 119 || length == 108 || length == 109 || length == 130 || length == 131 || length == 141 || + length == 142) && strings.Contains(script, "tableInsert") { + resp = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x30, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a} + } else if length == 53 && strings.Contains(script, "schema(loadTable") { + resp = []byte{0x32, 0x37, 0x38, 0x30, 0x30, 0x39, 0x30, 0x36, 0x35, 0x30, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, 0x19, 0x05, 0x12, 0x01, 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x00, 0x63, 0x6f, 0x6c, 0x44, 0x65, 0x66, 0x73, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x19, 0x01, 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x19, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x01, 0x65, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x4a, 0x4b, 0x00, 0x00, 0x49, 0x4b, 0x00, 0x00, 0x48, 0x4b, 0x00, 0x00, 0x47, 0x4b, 0x00, 0x00, 0x46, 0x4b, 0x00, 0x00, 0x45, 0x4b, 0x00, 0x00, 0x44, 0x4b, 0x00, 0x00, 0x43, 0x4b, 0x00, 0x00, 0x42, 0x4b, 0x00, 0x00, 0x41, 0x4b, + 0x00, 0x00, 0x40, 0x4b, 0x00, 0x00, 0x3f, 0x4b, 0x00, 0x00, 0x3e, 0x4b, 0x00, 0x00, 0x3d, 0x4b, 0x00, 0x00, 0x3c, 0x4b, 0x00, 0x00, 0x3b, 0x4b, 0x00, 0x00, 0x3a, 0x4b, 0x00, 0x00, 0x39, 0x4b, 0x00, 0x00, 0x38, 0x4b, 0x00, 0x00, 0x37, 0x4b, 0x00, 0x00, 0x36, 0x4b, 0x00, 0x00, 0x35, 0x4b, 0x00, 0x00, 0x34, 0x4b, 0x00, 0x00, 0x33, 0x4b, 0x00, 0x00, 0x32, 0x4b, 0x00, 0x00, 0x31, 0x4b, 0x00, 0x00, 0x30, 0x4b, 0x00, 0x00, 0x2f, 0x4b, 0x00, 0x00, 0x2e, 0x4b, 0x00, 0x00, 0x2d, 0x4b, 0x00, 0x00, 0x2c, 0x4b, 0x00, 0x00, 0x2b, 0x4b, 0x00, 0x00, 0x2a, 0x4b, 0x00, 0x00, 0x29, 0x4b, 0x00, 0x00, 0x28, 0x4b, 0x00, 0x00, 0x27, 0x4b, 0x00, 0x00, + 0x26, 0x4b, 0x00, 0x00, 0x25, 0x4b, 0x00, 0x00, 0x24, 0x4b, 0x00, 0x00, 0x23, 0x4b, 0x00, 0x00, 0x22, 0x4b, 0x00, 0x00, 0x21, 0x4b, 0x00, 0x00, 0x20, 0x4b, 0x00, 0x00, 0x1f, 0x4b, 0x00, 0x00, 0x1e, 0x4b, 0x00, 0x00, 0x1d, 0x4b, 0x00, 0x00, 0x1c, 0x4b, 0x00, 0x00, 0x1b, 0x4b, 0x00, 0x00, 0x1a, 0x4b, 0x00, 0x00, 0x19, 0x4b, 0x00, 0x00, 0x18, 0x4b, 0x00, 0x00, 0x17, 0x4b, 0x00, 0x00, 0x16, 0x4b, 0x00, 0x00, 0x15, 0x4b, 0x00, 0x00, 0x14, 0x4b, 0x00, 0x00, 0x13, 0x4b, 0x00, 0x00, 0x12, 0x4b, 0x00, 0x00, 0x11, 0x4b, 0x00, 0x00, 0x10, 0x4b, 0x00, 0x00, 0x0f, 0x4b, 0x00, 0x00, 0x0e, 0x4b, 0x00, 0x00, 0x0d, 0x4b, 0x00, 0x00, 0x0c, 0x4b, + 0x00, 0x00, 0x0b, 0x4b, 0x00, 0x00, 0x0a, 0x4b, 0x00, 0x00, 0x09, 0x4b, 0x00, 0x00, 0x08, 0x4b, 0x00, 0x00, 0x07, 0x4b, 0x00, 0x00, 0x06, 0x4b, 0x00, 0x00, 0x05, 0x4b, 0x00, 0x00, 0x04, 0x4b, 0x00, 0x00, 0x03, 0x4b, 0x00, 0x00, 0x02, 0x4b, 0x00, 0x00, 0x01, 0x4b, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0xff, 0x4a, 0x00, 0x00, 0xfe, 0x4a, 0x00, 0x00, 0xfd, 0x4a, 0x00, 0x00, 0xfc, 0x4a, 0x00, 0x00, 0xfb, 0x4a, 0x00, 0x00, 0xfa, 0x4a, 0x00, 0x00, 0xf9, 0x4a, 0x00, 0x00, 0xf8, 0x4a, 0x00, 0x00, 0xf7, 0x4a, 0x00, 0x00, 0xf6, 0x4a, 0x00, 0x00, 0xf5, 0x4a, 0x00, 0x00, 0xf4, 0x4a, 0x00, 0x00, 0xf3, 0x4a, 0x00, 0x00, 0xf2, 0x4a, 0x00, 0x00, + 0xf1, 0x4a, 0x00, 0x00, 0xf0, 0x4a, 0x00, 0x00, 0xef, 0x4a, 0x00, 0x00, 0xee, 0x4a, 0x00, 0x00, 0xed, 0x4a, 0x00, 0x00, 0xec, 0x4a, 0x00, 0x00, 0xeb, 0x4a, 0x00, 0x00, 0xea, 0x4a, 0x00, 0x00, 0xe9, 0x4a, 0x00, 0x00, 0xe8, 0x4a, 0x00, 0x00, 0xe7, 0x4a, 0x00, 0x00, 0xe6, 0x4a, 0x00, 0x00, 0x04, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x61, 0x6d, 0x65, 0x00, 0x74, 0x79, 0x70, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x00, 0x74, 0x79, 0x70, 0x65, 0x49, 0x6e, 0x74, 0x00, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x64, 0x61, 0x74, 0x65, 0x00, 0x73, 0x79, 0x6d, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x44, 0x41, 0x54, 0x45, 0x00, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x00, 0x48, 0x41, 0x53, 0x48, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00} + } else if length == 35 && strings.Contains(script, "schema(") { + resp = []byte{0x32, 0x37, 0x38, 0x30, 0x30, 0x39, 0x30, 0x36, 0x35, 0x30, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, 0x19, 0x05, 0x12, 0x01, 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x00, 0x63, 0x6f, 0x6c, 0x44, 0x65, 0x66, 0x73, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x19, 0x01, 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x19, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x01, 0x65, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x4a, 0x4b, 0x00, 0x00, 0x49, 0x4b, 0x00, 0x00, 0x48, 0x4b, 0x00, 0x00, 0x47, 0x4b, 0x00, 0x00, 0x46, 0x4b, 0x00, 0x00, 0x45, 0x4b, 0x00, 0x00, 0x44, 0x4b, 0x00, 0x00, 0x43, 0x4b, 0x00, 0x00, 0x42, 0x4b, 0x00, 0x00, 0x41, 0x4b, + 0x00, 0x00, 0x40, 0x4b, 0x00, 0x00, 0x3f, 0x4b, 0x00, 0x00, 0x3e, 0x4b, 0x00, 0x00, 0x3d, 0x4b, 0x00, 0x00, 0x3c, 0x4b, 0x00, 0x00, 0x3b, 0x4b, 0x00, 0x00, 0x3a, 0x4b, 0x00, 0x00, 0x39, 0x4b, 0x00, 0x00, 0x38, 0x4b, 0x00, 0x00, 0x37, 0x4b, 0x00, 0x00, 0x36, 0x4b, 0x00, 0x00, 0x35, 0x4b, 0x00, 0x00, 0x34, 0x4b, 0x00, 0x00, 0x33, 0x4b, 0x00, 0x00, 0x32, 0x4b, 0x00, 0x00, 0x31, 0x4b, 0x00, 0x00, 0x30, 0x4b, 0x00, 0x00, 0x2f, 0x4b, 0x00, 0x00, 0x2e, 0x4b, 0x00, 0x00, 0x2d, 0x4b, 0x00, 0x00, 0x2c, 0x4b, 0x00, 0x00, 0x2b, 0x4b, 0x00, 0x00, 0x2a, 0x4b, 0x00, 0x00, 0x29, 0x4b, 0x00, 0x00, 0x28, 0x4b, 0x00, 0x00, 0x27, 0x4b, 0x00, 0x00, + 0x26, 0x4b, 0x00, 0x00, 0x25, 0x4b, 0x00, 0x00, 0x24, 0x4b, 0x00, 0x00, 0x23, 0x4b, 0x00, 0x00, 0x22, 0x4b, 0x00, 0x00, 0x21, 0x4b, 0x00, 0x00, 0x20, 0x4b, 0x00, 0x00, 0x1f, 0x4b, 0x00, 0x00, 0x1e, 0x4b, 0x00, 0x00, 0x1d, 0x4b, 0x00, 0x00, 0x1c, 0x4b, 0x00, 0x00, 0x1b, 0x4b, 0x00, 0x00, 0x1a, 0x4b, 0x00, 0x00, 0x19, 0x4b, 0x00, 0x00, 0x18, 0x4b, 0x00, 0x00, 0x17, 0x4b, 0x00, 0x00, 0x16, 0x4b, 0x00, 0x00, 0x15, 0x4b, 0x00, 0x00, 0x14, 0x4b, 0x00, 0x00, 0x13, 0x4b, 0x00, 0x00, 0x12, 0x4b, 0x00, 0x00, 0x11, 0x4b, 0x00, 0x00, 0x10, 0x4b, 0x00, 0x00, 0x0f, 0x4b, 0x00, 0x00, 0x0e, 0x4b, 0x00, 0x00, 0x0d, 0x4b, 0x00, 0x00, 0x0c, 0x4b, + 0x00, 0x00, 0x0b, 0x4b, 0x00, 0x00, 0x0a, 0x4b, 0x00, 0x00, 0x09, 0x4b, 0x00, 0x00, 0x08, 0x4b, 0x00, 0x00, 0x07, 0x4b, 0x00, 0x00, 0x06, 0x4b, 0x00, 0x00, 0x05, 0x4b, 0x00, 0x00, 0x04, 0x4b, 0x00, 0x00, 0x03, 0x4b, 0x00, 0x00, 0x02, 0x4b, 0x00, 0x00, 0x01, 0x4b, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0xff, 0x4a, 0x00, 0x00, 0xfe, 0x4a, 0x00, 0x00, 0xfd, 0x4a, 0x00, 0x00, 0xfc, 0x4a, 0x00, 0x00, 0xfb, 0x4a, 0x00, 0x00, 0xfa, 0x4a, 0x00, 0x00, 0xf9, 0x4a, 0x00, 0x00, 0xf8, 0x4a, 0x00, 0x00, 0xf7, 0x4a, 0x00, 0x00, 0xf6, 0x4a, 0x00, 0x00, 0xf5, 0x4a, 0x00, 0x00, 0xf4, 0x4a, 0x00, 0x00, 0xf3, 0x4a, 0x00, 0x00, 0xf2, 0x4a, 0x00, 0x00, + 0xf1, 0x4a, 0x00, 0x00, 0xf0, 0x4a, 0x00, 0x00, 0xef, 0x4a, 0x00, 0x00, 0xee, 0x4a, 0x00, 0x00, 0xed, 0x4a, 0x00, 0x00, 0xec, 0x4a, 0x00, 0x00, 0xeb, 0x4a, 0x00, 0x00, 0xea, 0x4a, 0x00, 0x00, 0xe9, 0x4a, 0x00, 0x00, 0xe8, 0x4a, 0x00, 0x00, 0xe7, 0x4a, 0x00, 0x00, 0xe6, 0x4a, 0x00, 0x00, 0x04, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x61, 0x6d, 0x65, 0x00, 0x74, 0x79, 0x70, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x00, 0x74, 0x79, 0x70, 0x65, 0x49, 0x6e, 0x74, 0x00, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x64, 0x61, 0x74, 0x65, 0x00, 0x73, 0x79, 0x6d, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x44, 0x41, 0x54, 0x45, 0x00, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x00, 0x48, 0x41, 0x53, 0x48, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00} + } else if length == 41 && strings.Contains(script, "schema(") { + resp = []byte{0x32, 0x37, 0x38, 0x30, 0x30, 0x39, 0x30, 0x36, 0x35, 0x30, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, + 0x0a, 0x19, 0x05, 0x12, 0x01, 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x00, 0x70, 0x61, 0x72, + 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, + 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x63, 0x6f, 0x6c, 0x44, 0x65, 0x66, 0x73, 0x00, 0x19, 0x01, + 0x06, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x73, 0x79, 0x6d, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x74, 0x00, 0x04, 0x00, 0x01, 0x00, 0x00, 0x00, 0x04, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x74, 0x79, 0x70, 0x65, 0x49, 0x6e, + 0x74, 0x00, 0x6e, 0x61, 0x6d, 0x65, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x73, 0x78, 0x6d, 0x00, 0x64, 0x61, 0x74, 0x65, 0x00} + } else if length == 59 && strings.Contains(script, "schema(loadTable") { + resp = []byte{0x32, 0x37, 0x38, 0x30, 0x30, 0x39, 0x30, 0x36, 0x35, 0x30, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, 0x19, 0x05, 0x12, 0x01, 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x69, 0x74, 0x65, 0x73, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x00, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x54, 0x79, 0x70, 0x65, + 0x00, 0x6b, 0x65, 0x65, 0x70, 0x44, 0x75, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, 0x00, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x47, 0x72, 0x61, 0x6e, 0x75, 0x6c, 0x61, 0x72, 0x69, 0x74, 0x79, 0x00, 0x63, 0x6f, 0x6c, 0x44, 0x65, 0x66, 0x73, 0x00, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x50, 0x61, 0x74, 0x68, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6c, 0x75, 0x6d, + 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x00, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x00, 0x19, 0x01, 0x0c, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x19, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x01, 0x65, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x4a, 0x4b, 0x00, 0x00, 0x49, 0x4b, 0x00, 0x00, 0x48, 0x4b, 0x00, 0x00, 0x47, 0x4b, 0x00, 0x00, 0x46, 0x4b, 0x00, 0x00, 0x45, 0x4b, 0x00, 0x00, 0x44, 0x4b, 0x00, 0x00, 0x43, 0x4b, 0x00, 0x00, 0x42, 0x4b, 0x00, + 0x00, 0x41, 0x4b, 0x00, 0x00, 0x40, 0x4b, 0x00, 0x00, 0x3f, 0x4b, 0x00, 0x00, 0x3e, 0x4b, 0x00, 0x00, 0x3d, 0x4b, 0x00, 0x00, 0x3c, 0x4b, 0x00, 0x00, 0x3b, 0x4b, 0x00, 0x00, 0x3a, 0x4b, 0x00, 0x00, 0x39, 0x4b, 0x00, 0x00, 0x38, 0x4b, 0x00, 0x00, 0x37, 0x4b, 0x00, 0x00, 0x36, 0x4b, 0x00, 0x00, 0x35, 0x4b, 0x00, 0x00, 0x34, 0x4b, 0x00, 0x00, 0x33, 0x4b, 0x00, 0x00, 0x32, 0x4b, 0x00, 0x00, 0x31, 0x4b, 0x00, 0x00, 0x30, 0x4b, 0x00, 0x00, 0x2f, 0x4b, 0x00, 0x00, 0x2e, 0x4b, 0x00, 0x00, 0x2d, 0x4b, 0x00, 0x00, 0x2c, 0x4b, 0x00, 0x00, 0x2b, 0x4b, 0x00, 0x00, 0x2a, 0x4b, 0x00, 0x00, 0x29, 0x4b, 0x00, 0x00, 0x28, 0x4b, 0x00, 0x00, 0x27, + 0x4b, 0x00, 0x00, 0x26, 0x4b, 0x00, 0x00, 0x25, 0x4b, 0x00, 0x00, 0x24, 0x4b, 0x00, 0x00, 0x23, 0x4b, 0x00, 0x00, 0x22, 0x4b, 0x00, 0x00, 0x21, 0x4b, 0x00, 0x00, 0x20, 0x4b, 0x00, 0x00, 0x1f, 0x4b, 0x00, 0x00, 0x1e, 0x4b, 0x00, 0x00, 0x1d, 0x4b, 0x00, 0x00, 0x1c, 0x4b, 0x00, 0x00, 0x1b, 0x4b, 0x00, 0x00, 0x1a, 0x4b, 0x00, 0x00, 0x19, 0x4b, 0x00, 0x00, 0x18, 0x4b, 0x00, 0x00, 0x17, 0x4b, 0x00, 0x00, 0x16, 0x4b, 0x00, 0x00, 0x15, 0x4b, 0x00, 0x00, 0x14, 0x4b, 0x00, 0x00, 0x13, 0x4b, 0x00, 0x00, 0x12, 0x4b, 0x00, 0x00, 0x11, 0x4b, 0x00, 0x00, 0x10, 0x4b, 0x00, 0x00, 0x0f, 0x4b, 0x00, 0x00, 0x0e, 0x4b, 0x00, 0x00, 0x0d, 0x4b, 0x00, + 0x00, 0x0c, 0x4b, 0x00, 0x00, 0x0b, 0x4b, 0x00, 0x00, 0x0a, 0x4b, 0x00, 0x00, 0x09, 0x4b, 0x00, 0x00, 0x08, 0x4b, 0x00, 0x00, 0x07, 0x4b, 0x00, 0x00, 0x06, 0x4b, 0x00, 0x00, 0x05, 0x4b, 0x00, 0x00, 0x04, 0x4b, 0x00, 0x00, 0x03, 0x4b, 0x00, 0x00, 0x02, 0x4b, 0x00, 0x00, 0x01, 0x4b, 0x00, 0x00, 0x00, 0x4b, 0x00, 0x00, 0xff, 0x4a, 0x00, 0x00, 0xfe, 0x4a, 0x00, 0x00, 0xfd, 0x4a, 0x00, 0x00, 0xfc, 0x4a, 0x00, 0x00, 0xfb, 0x4a, 0x00, 0x00, 0xfa, 0x4a, 0x00, 0x00, 0xf9, 0x4a, 0x00, 0x00, 0xf8, 0x4a, 0x00, 0x00, 0xf7, 0x4a, 0x00, 0x00, 0xf6, 0x4a, 0x00, 0x00, 0xf5, 0x4a, 0x00, 0x00, 0xf4, 0x4a, 0x00, 0x00, 0xf3, 0x4a, 0x00, 0x00, 0xf2, + 0x4a, 0x00, 0x00, 0xf1, 0x4a, 0x00, 0x00, 0xf0, 0x4a, 0x00, 0x00, 0xef, 0x4a, 0x00, 0x00, 0xee, 0x4a, 0x00, 0x00, 0xed, 0x4a, 0x00, 0x00, 0xec, 0x4a, 0x00, 0x00, 0xeb, 0x4a, 0x00, 0x00, 0xea, 0x4a, 0x00, 0x00, 0xe9, 0x4a, 0x00, 0x00, 0xe8, 0x4a, 0x00, 0x00, 0xe7, 0x4a, 0x00, 0x00, 0xe6, 0x4a, 0x00, 0x00, 0x04, 0x00, 0x05, 0x00, 0x00, 0x00, 0x12, 0x00, 0x4f, 0x4c, 0x41, 0x50, 0x00, 0x12, 0x00, 0x41, 0x4c, 0x4c, 0x00, 0x12, 0x00, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x00, 0x00, 0x06, 0x02, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x61, 0x6d, 0x65, 0x00, 0x74, 0x79, 0x70, 0x65, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x00, 0x74, + 0x79, 0x70, 0x65, 0x49, 0x6e, 0x74, 0x00, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x74, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x64, 0x61, 0x74, 0x65, 0x00, 0x73, 0x79, 0x6d, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x44, 0x41, 0x54, 0x45, 0x00, 0x53, 0x54, 0x52, 0x49, 0x4e, 0x47, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, + 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x12, 0x00, 0x00, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x64, 0x61, 0x74, 0x65, 0x00, 0x73, 0x79, 0x6d, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x00, 0x48, 0x41, 0x53, 0x48, 0x00, 0x04, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00} + } + + if len(resp) > 0 { + _, err = conn.Write(resp) + if err != nil { + return + } + + res = make([]byte, 0) + } + } +} + +func isExit(exit <-chan bool) bool { + select { + case <-exit: + return true + default: + return false + } +} diff --git a/multigoroutinetable/status.go b/multigoroutinetable/status.go new file mode 100644 index 0000000..2daeaf8 --- /dev/null +++ b/multigoroutinetable/status.go @@ -0,0 +1,56 @@ +package multigoroutinetable + +import ( + "fmt" + "strings" +) + +// Status is used to store the status of MultiGoroutineTable. +type Status struct { + // errMsg of MultiGoroutineTable + ErrMsg string + // the number of records that failed to be sent + FailedRows int + // the number of records that have been sent + SentRows int + // the number of unsent records + UnSentRows int + // check whether the MultiGoroutineTable finished + IsExit bool + // list the status of goroutines + GoroutineStatusList []*GoroutineStatus +} + +// String returns the status of goroutines in string format. +func (s *Status) String() string { + by := strings.Builder{} + by.WriteString(fmt.Sprintf("errMsg : %s\n", s.ErrMsg)) + by.WriteString(fmt.Sprintf("isExit : %v\n", s.IsExit)) + by.WriteString(fmt.Sprintf("sentRows : %d\n", s.SentRows)) + by.WriteString(fmt.Sprintf("unSentRows : %d\n", s.UnSentRows)) + by.WriteString(fmt.Sprintf("sendFailedRows : %d\n", s.FailedRows)) + by.WriteString("goroutineStatus :\n") + for _, v := range s.GoroutineStatusList { + by.WriteString(fmt.Sprintf(" %s\n", v.String())) + } + + return by.String() +} + +// GoroutineStatus records the status of goroutine. +type GoroutineStatus struct { + // goroutine index + GoroutineIndex int + // the number of records that failed to be sent + FailedRows int + // the number of records that have been sent + SentRows int + // the number of unsent records + UnSentRows int +} + +// String returns the status of goroutines in string format. +func (ts *GoroutineStatus) String() string { + return fmt.Sprintf("goroutineIndex: %d, sentRows: %d, unSentRows: %d, sendFailedRows: %d", + ts.GoroutineIndex, ts.SentRows, ts.UnSentRows, ts.FailedRows) +} diff --git a/multigoroutinetable/status_test.go b/multigoroutinetable/status_test.go new file mode 100644 index 0000000..943bd4d --- /dev/null +++ b/multigoroutinetable/status_test.go @@ -0,0 +1,7 @@ +package multigoroutinetable + +import "testing" + +func TestStatus(t *testing.T) { + +} diff --git a/multigoroutinetable/writer_goroutine.go b/multigoroutinetable/writer_goroutine.go new file mode 100644 index 0000000..9ca7427 --- /dev/null +++ b/multigoroutinetable/writer_goroutine.go @@ -0,0 +1,344 @@ +package multigoroutinetable + +import ( + "errors" + "fmt" + "runtime" + "time" + + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/model" + + "github.com/smallnest/chanx" +) + +type writerGoroutine struct { + dialer.Conn + + signal chan bool + tableWriter *MultiGoroutineTable + writeQueue *chanx.UnboundedChan + failedQueue *chanx.UnboundedChan + + insertScript string + saveScript string + sentRows int + isFinished bool + exit chan bool + goroutineIndex int +} + +func newWriterGoroutine(goroutineIndex int, mtw *MultiGoroutineTable, conn dialer.Conn) *writerGoroutine { + res := &writerGoroutine{ + goroutineIndex: goroutineIndex, + Conn: conn, + tableWriter: mtw, + signal: make(chan bool), + exit: make(chan bool), + writeQueue: chanx.NewUnboundedChan(mtw.batchSize), + failedQueue: chanx.NewUnboundedChan(mtw.batchSize), + } + + go res.run() + + time.Sleep(1 * time.Millisecond) + + return res +} + +func (w *writerGoroutine) run() { + if !w.init() { + return + } + + w.exit = make(chan bool) + + for !w.isExit() { + <-w.signal + if !w.isExit() && w.tableWriter.batchSize > 1 && w.tableWriter.throttle > 0 { + end := time.Now().Add(time.Duration(w.tableWriter.throttle) * time.Millisecond) + if !w.isExit() && w.writeQueue.Len() < w.tableWriter.batchSize { + for time.Now().Before(end) { + // Nothing + } + } + } + + for !w.isExit() && w.writeAllData() { + } + } + + for !w.tableWriter.isExist() && w.writeAllData() { + } + + w.isFinished = true +} + +func (w *writerGoroutine) getStatus(status *GoroutineStatus) { + status.GoroutineIndex = w.goroutineIndex + status.SentRows = w.sentRows + status.UnSentRows = w.writeQueue.Len() + status.FailedRows = w.failedQueue.Len() +} + +func (w *writerGoroutine) init() bool { + if w.tableWriter.database == "" { + w.insertScript = fmt.Sprintf("tableInsert{\"%s\"}", w.tableWriter.tableName) + } else { + w.insertScript = fmt.Sprintf("tableInsert{loadTable(\"%s\",\"%s\")}", w.tableWriter.database, w.tableWriter.tableName) + } + + return true +} + +func (w *writerGoroutine) writeAllData() bool { + items := make([][]model.DataType, 0) +loop: + for { + select { + case val := <-w.writeQueue.Out: + items = append(items, val.([]model.DataType)) + default: + if w.writeQueue.Len() == 0 { + break loop + } + } + } + + if size := len(items); size < 1 { + return false + } + + defer func() { + re := recover() + if re != nil { + for _, v := range items { + w.failedQueue.In <- v + } + + buf := make([]byte, 4096) + n := runtime.Stack(buf, false) + fmt.Println("Failed to insert data into the table: ", string(buf[:n])) + w.tableWriter.errorInfo = fmt.Sprintf("%v", re) + } + }() + + addRowCount := len(items) + writeTable, isWriteDone := w.packWriteTable(items) + if isWriteDone && writeTable != nil && addRowCount > 0 { + err := w.runScript(writeTable, addRowCount) + if err != nil { + isWriteDone = false + w.tableWriter.errorInfo = err.Error() + w.tableWriter.hasError = true + if w.Conn != nil { + w.Conn.Close() + } + + w.Conn = nil + } + } + + if !isWriteDone { + for _, v := range items { + w.failedQueue.In <- v + } + } + + return true +} + +func (w *writerGoroutine) packWriteTable(items [][]model.DataType) (*model.Table, bool) { + isWriteDone := true + colValues := make([]*model.Vector, len(w.tableWriter.colTypes)) + for k, v := range w.tableWriter.colTypes { + var vct *model.Vector + switch { + case v >= 128: + dtl := model.NewEmptyDataTypeList(model.DataTypeByte(v-128), len(items)) + vct = model.NewVector(dtl) + case v >= 64: + vl := make([]*model.Vector, 0) + for i := 0; i < len(items); i++ { + item := items[i][k].Value().(*model.Vector) + vl = append(vl, item) + } + + av := model.NewArrayVector(vl) + vct = model.NewVectorWithArrayVector(av) + default: + dtl := model.NewEmptyDataTypeList(model.DataTypeByte(v), len(items)) + vct = model.NewVector(dtl) + } + + colValues[k] = vct + } + + for k, row := range items { + for ind, col := range colValues { + if col.ArrayVector == nil { + err := col.Set(k, row[ind]) + if err != nil { + fmt.Println("Failed to set DataType into Vector: ", err) + isWriteDone = false + w.tableWriter.hasError = true + w.tableWriter.errorInfo = err.Error() + break + } + } + } + } + + if isWriteDone { + return model.NewTable(w.tableWriter.colNames, colValues), true + } + + return nil, false +} + +func (w *writerGoroutine) runScript(df model.DataForm, count int) (err error) { + defer func() { + raw := recover() + if raw != nil { + buf := make([]byte, 4096) + n := runtime.Stack(buf, false) + fmt.Println("Failed to call function tableInsert: ", string(buf[:n])) + err = raw.(error) + } + }() + + args := make([]model.DataForm, 1) + args[0] = df + _, err = w.RunFunc(w.insertScript, args) + if err != nil { + fmt.Printf("Failed to run func: %s\n", err.Error()) + return err + } + + if w.saveScript != "" { + _, err = w.RunScript(w.saveScript) + if err != nil { + fmt.Printf("Failed to run script: %s\n", err.Error()) + return err + } + } + + w.sentRows += count + + return nil +} + +func (w *writerGoroutine) isExit() bool { + select { + case <-w.exit: + return true + default: + return w.tableWriter.hasError + } +} + +func (w *writerGoroutine) stop() { + select { + case <-w.exit: + default: + close(w.exit) + } + + select { + case w.signal <- true: + default: + } +} + +func (mtt *MultiGoroutineTable) handlePartColNamesScalar(partColNames model.DataForm, schema *model.Dictionary, partitionCol string) (model.DataForm, int32, error) { + s := partColNames.(*model.Scalar) + if realStr := s.DataType.String(); realStr != partitionCol { + return nil, 0, fmt.Errorf("the parameter PartitionCol must be the partitioning column %s in the table", realStr) + } + + dt, err := schema.Get("partitionColumnIndex") + if err != nil { + fmt.Printf("Failed to get partitionColumnIndex: %s\n", err.Error()) + return nil, 0, err + } + + s = dt.Value().(*model.Scalar) + raw := s.DataType.Value() + mtt.partitionColumnIdx = raw.(int32) + + dt, err = schema.Get("partitionSchema") + if err != nil { + fmt.Printf("Failed to get partitionSchema: %s\n", err.Error()) + return nil, 0, err + } + + partitionSchema := dt.Value().(model.DataForm) + + dt, err = schema.Get("partitionType") + if err != nil { + fmt.Printf("Failed to get partitionType: %s\n", err.Error()) + return nil, 0, err + } + + s = dt.Value().(*model.Scalar) + raw = s.DataType.Value() + return partitionSchema, raw.(int32), nil +} + +func (mtt *MultiGoroutineTable) handlePartColNamesVector(partColNames model.DataForm, schema *model.Dictionary, partitionCol string) (model.DataForm, int32, error) { + dims := partColNames.Rows() + if dims > 1 && partitionCol == "" { + return nil, 0, errors.New("the parameter partitionCol must be specified for a partitioned table") + } + + vct := partColNames.(*model.Vector) + names := vct.Data.StringList() + ind := -1 + for k, v := range names { + if v == partitionCol { + ind = k + break + } + } + + if ind == -1 { + return nil, 0, errors.New("the parameter partitionCol must be the partitioning columns in the partitioned table") + } + + dt, err := schema.Get("partitionColumnIndex") + if err != nil { + fmt.Printf("Failed to get partitionColumnIndex: %s\n", err.Error()) + return nil, 0, err + } + + s := dt.Value().(*model.Vector) + raw := s.Data.ElementValue(ind) + mtt.partitionColumnIdx = raw.(int32) + + dt, err = schema.Get("partitionSchema") + if err != nil { + fmt.Printf("Failed to get partitionSchema: %s\n", err.Error()) + return nil, 0, err + } + + vct = dt.Value().(*model.Vector) + partitionSchema := vct.Data.ElementValue(ind).(model.DataForm) + + dt, err = schema.Get("partitionType") + if err != nil { + fmt.Printf("Failed to get partitionType: %s\n", err.Error()) + return nil, 0, err + } + + s = dt.Value().(*model.Vector) + raw = s.Data.ElementValue(ind) + return partitionSchema, raw.(int32), nil +} + +func (mtt *MultiGoroutineTable) handlePartColNames(partColNames model.DataForm, schema *model.Dictionary, partitionCol string) (model.DataForm, int32, error) { + if partColNames.GetDataForm() == model.DfScalar { + return mtt.handlePartColNamesScalar(partColNames, schema, partitionCol) + } + + return mtt.handlePartColNamesVector(partColNames, schema, partitionCol) +} diff --git a/multigoroutinetable/writer_goroutine_test.go b/multigoroutinetable/writer_goroutine_test.go new file mode 100644 index 0000000..a7a7a2c --- /dev/null +++ b/multigoroutinetable/writer_goroutine_test.go @@ -0,0 +1,7 @@ +package multigoroutinetable + +import "testing" + +func TestWriterGoroutine(t *testing.T) { + +} diff --git a/streaming/abstract_client.go b/streaming/abstract_client.go new file mode 100644 index 0000000..b10dd5d --- /dev/null +++ b/streaming/abstract_client.go @@ -0,0 +1,15 @@ +package streaming + +// AbstractClient is the client interface for streaming subscription. +type AbstractClient interface { + activeCloseConnection(si *site) error + tryReconnect(topic string) bool + doReconnect(si *site) bool + getSubscriber() *subscriber + + subscribe(req *SubscribeRequest) error + // UnSubscribe helps you to unsubscribe the specific action of the table according to the req + UnSubscribe(req *SubscribeRequest) error + // IsClose checks whether the client is close + IsClosed() bool +} diff --git a/streaming/abstract_client_test.go b/streaming/abstract_client_test.go new file mode 100644 index 0000000..05e985e --- /dev/null +++ b/streaming/abstract_client_test.go @@ -0,0 +1 @@ +package streaming diff --git a/streaming/basic_message.go b/streaming/basic_message.go new file mode 100644 index 0000000..21ade5c --- /dev/null +++ b/streaming/basic_message.go @@ -0,0 +1,58 @@ +package streaming + +import ( + "strings" + + "github.com/dolphindb/api-go/model" +) + +// IMessage is the interface of subscription messages. +type IMessage interface { + // GetTopic returns the topic in string format. + GetTopic() string + // GetOffset returns the offset of the subscription messages. + GetOffset() int64 + // GetValue returns the value of the subscription messages based on the column index of the subscribed table. + GetValue(index int) model.DataForm + // GetValueByName returns the value of the subscription messages based on the column name of the subscribed table. + GetValueByName(name string) model.DataForm +} + +// Message is the implementation of the IMessage. +type Message struct { + offset int64 + topic string + nameToIndex map[string]int + + msg *model.Vector +} + +// GetTopic returns the topic in string format. +func (m *Message) GetTopic() string { + return m.topic +} + +// GetOffset returns the offset of the subscription messages. +func (m *Message) GetOffset() int64 { + return m.offset +} + +// GetValue returns the value of the subscription message based on the column index of the subscribed table. +func (m *Message) GetValue(index int) model.DataForm { + if m.msg != nil { + return m.msg.Data.ElementValue(index).(model.DataForm) + } + + return nil +} + +// GetValueByName returns the value of the subscription message based on the column name of the subscribed table. +func (m *Message) GetValueByName(name string) model.DataForm { + if m.msg != nil { + if ind, ok := m.nameToIndex[strings.ToLower(name)]; ok { + return m.msg.Data.ElementValue(ind).(model.DataForm) + } + } + + return nil +} diff --git a/streaming/basic_message_test.go b/streaming/basic_message_test.go new file mode 100644 index 0000000..901a45a --- /dev/null +++ b/streaming/basic_message_test.go @@ -0,0 +1,39 @@ +package streaming + +import ( + "testing" + + "github.com/dolphindb/api-go/model" + + "github.com/stretchr/testify/assert" +) + +func TestMessage(t *testing.T) { + dtl, err := model.NewDataTypeListWithRaw(model.DtString, []string{"name", "prefix"}) + assert.Nil(t, err) + + s0 := model.NewScalar(dtl.Get(0)) + s1 := model.NewScalar(dtl.Get(1)) + + dtl, err = model.NewDataTypeListWithRaw(model.DtAny, []model.DataForm{s0, s1}) + assert.Nil(t, err) + + vct := model.NewVector(dtl) + + msg := &Message{ + offset: -1, + topic: "topic", + nameToIndex: map[string]int{ + "name": 0, + "prefix": 1, + }, + msg: vct, + } + + assert.Equal(t, msg.GetOffset(), int64(-1)) + assert.Equal(t, msg.GetTopic(), "topic") + assert.Equal(t, msg.GetValue(0), s0) + assert.Equal(t, msg.GetValue(1), s1) + assert.Equal(t, msg.GetValueByName("name"), s0) + assert.Equal(t, msg.GetValueByName("prefix"), s1) +} diff --git a/streaming/connection_detector.go b/streaming/connection_detector.go new file mode 100644 index 0000000..628d60a --- /dev/null +++ b/streaming/connection_detector.go @@ -0,0 +1,49 @@ +package streaming + +import ( + "context" + "fmt" + "net" + "time" +) + +type connectionDetector struct { + net.Conn + ctx context.Context +} + +func (c *connectionDetector) run() { + for !c.IsClosed() { + _, err := c.Write([]byte{0xff}) + if err != nil { + failCount := 0 + for i := 0; i < 5; i++ { + _, err = c.Write([]byte{0xff}) + if err != nil { + failCount++ + } + + time.Sleep(1000 * time.Millisecond) + } + + if failCount != 5 { + continue + } + + c.Close() + fmt.Println("Connection closed!!") + return + } + + time.Sleep(1000 * time.Millisecond) + } +} + +func (c *connectionDetector) IsClosed() bool { + select { + case <-c.ctx.Done(): + return true + default: + return false + } +} diff --git a/streaming/connection_detector_test.go b/streaming/connection_detector_test.go new file mode 100644 index 0000000..05e985e --- /dev/null +++ b/streaming/connection_detector_test.go @@ -0,0 +1 @@ +package streaming diff --git a/streaming/goroutine_client.go b/streaming/goroutine_client.go new file mode 100644 index 0000000..e3706b6 --- /dev/null +++ b/streaming/goroutine_client.go @@ -0,0 +1,228 @@ +package streaming + +import ( + "fmt" + "sync" + "time" +) + +// GoroutineClient is an implementation of AbstractClient for streaming subscription. +type GoroutineClient struct { + *subscriber + + exit chan bool + + handlerLoppers sync.Map +} + +// NewGoroutineClient instantiates an instance of GoroutineClient, which is used to listen on the listening port to receive subscription info. +// When listeningHost is "", the default host is the local address. +// When listeningPort is 0, the default port is the 8849. +func NewGoroutineClient(listeningHost string, listeningPort int) *GoroutineClient { + if listeningPort == 0 { + listeningPort = DefaultPort + } + + t := &GoroutineClient{ + subscriber: newSubscriber(listeningHost, listeningPort), + exit: make(chan bool), + handlerLoppers: sync.Map{}, + } + + go listening(t, listeningPort) + + return t +} + +// Subscribe helps you to subscribe the specific action of the table according to the req. +func (t *GoroutineClient) Subscribe(req *SubscribeRequest) error { + return t.subscribe(req) +} + +// Subscribe helps you to subscribe the specific action of the table according to the req. +func (t *GoroutineClient) subscribe(req *SubscribeRequest) error { + queue, err := t.subscribeInternal(req) + if err != nil { + return err + } + + handlerLooper := &handlerLopper{ + queue: queue, + handler: req.Handler, + batchSize: req.BatchSize, + throttle: req.Throttle, + } + + if req.Handler == nil { + handlerLooper.handler = &DefaultMessageHandler{} + } + + go handlerLooper.run() + + conn, err := newConnectedConn(req.Address) + if err != nil { + fmt.Printf("Failed to connect to server: %s\n", err.Error()) + return err + } + + defer conn.Close() + + topicStr, err := t.getTopicFromServer(req.TableName, req.ActionName, conn) + if err != nil { + fmt.Printf("Failed to get topic from server: %s\n", err.Error()) + return err + } + + t.handlerLoppers.Store(topicStr, handlerLooper) + + return nil +} + +// UnSubscribe helps you to unsubscribe the specific action of the table according to the req. +func (t *GoroutineClient) UnSubscribe(req *SubscribeRequest) error { + err := t.unSubscribe(req) + if err != nil { + fmt.Printf("UnSubscribe Failed: %s\n", err.Error()) + return err + } + + conn, err := newConnectedConn(req.Address) + if err != nil { + fmt.Printf("Failed to connect to server: %s\n", err.Error()) + return err + } + + defer conn.Close() + + topicStr, err := t.getTopicFromServer(req.TableName, req.ActionName, conn) + if err != nil { + fmt.Printf("Failed to get topic from server: %s\n", err.Error()) + return err + } + + raw, ok := t.handlerLoppers.Load(topicStr) + if !ok { + return nil + } + + handlerLopper := raw.(*handlerLopper) + t.handlerLoppers.Delete(topicStr) + handlerLopper.stop() + + return err +} + +func (t *GoroutineClient) getSubscriber() *subscriber { + return t.subscriber +} + +// IsClosed checks whether the client is closed. +func (t *GoroutineClient) IsClosed() bool { + select { + case <-t.exit: + return true + default: + return false + } +} + +// Close closes the client and stop subscribing. +func (t *GoroutineClient) Close() { + t.handlerLoppers.Range(func(k, v interface{}) bool { + val := v.(*handlerLopper) + + val.stop() + return true + }) + + t.handlerLoppers = sync.Map{} + t.stop() +} + +func (t *GoroutineClient) doReconnect(s *site) bool { + conn, err := newConnectedConn(s.address) + if err != nil { + fmt.Printf("Failed to connect to server: %s\n", err.Error()) + return false + } + + topic, err := t.getTopicFromServer(s.tableName, s.actionName, conn) + if err != nil { + fmt.Printf("Failed to get topic from server during reconnection using doReconnect: %s\n", err.Error()) + return false + } + + raw, ok := t.handlerLoppers.Load(topic) + if !ok || raw == nil { + fmt.Println("Goroutine for subscription is not started") + return false + } + + handlerLopper := raw.(*handlerLopper) + + handlerLopper.stop() + + req := &SubscribeRequest{ + Address: s.address, + TableName: s.tableName, + ActionName: s.actionName, + Handler: s.handler, + Offset: s.msgID + 1, + Filter: s.filter, + Reconnect: s.reconnect, + } + + err = t.Subscribe(req) + if err != nil { + fmt.Printf("%s %s Unable to subscribe to the table. Try again after 1 second.\n", time.Now().UTC().String(), topic) + return false + } + + fmt.Printf("%s %s Successfully reconnected and subscribed.\n", time.Now().UTC().String(), topic) + return true +} + +func (t *GoroutineClient) stop() { + select { + case <-t.exit: + default: + close(t.exit) + } +} + +func (t *GoroutineClient) tryReconnect(topic string) bool { + topicRaw, ok := haTopicToTrueTopic.Load(topic) + if !ok { + return false + } + + queueMap.Delete(topicRaw) + + raw, ok := trueTopicToSites.Load(topicRaw) + if !ok { + return false + } + + sites := raw.([]*site) + + if len(sites) == 0 { + return false + } + + if len(sites) == 1 && !sites[0].reconnect { + return false + } + + site := getActiveSite(sites) + if site != nil { + if t.doReconnect(site) { + waitReconnectTopic.Delete(topicRaw) + return true + } + + waitReconnectTopic.Store(topicRaw, topicRaw) + return false + } + + return false +} diff --git a/streaming/goroutine_client_test.go b/streaming/goroutine_client_test.go new file mode 100644 index 0000000..93fb0ee --- /dev/null +++ b/streaming/goroutine_client_test.go @@ -0,0 +1,54 @@ +package streaming + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var failedAction = "failedAction" + +func TestGoroutineClient(t *testing.T) { + tc := NewGoroutineClient(localhost, 3888) + + req := &SubscribeRequest{ + Address: testAddr, + TableName: "threaded", + ActionName: "action1", + Offset: 0, + Reconnect: true, + } + + req.SetBatchSize(10).SetThrottle(1) + + err := tc.Subscribe(req) + assert.Nil(t, err) + + sendOneSubscription(3888) + + sub := tc.getSubscriber() + assert.Equal(t, sub.listeningPort, int32(3888)) + assert.Equal(t, strings.HasPrefix(sub.listeningHost, "127.0.0.1"), true) + + item := &reconnectItem{ + reconnectState: 1, + lastReconnectTimestamp: time.Now().UnixNano() / 1000000, + } + + item.putTopic("127.0.0.1:3000:local3000/threaded/action1") + reconnectTable.Store("127.0.0.1:3000:local3000", item) + + time.Sleep(4 * time.Second) + + err = tc.UnSubscribe(req) + assert.Nil(t, err) + + req.ActionName = failedAction + err = tc.Subscribe(req) + assert.Equal(t, err.Error(), "client error response. @K") + tc.Close() + + time.Sleep(2 * time.Second) +} diff --git a/streaming/goroutine_pooled_client.go b/streaming/goroutine_pooled_client.go new file mode 100644 index 0000000..7351a5c --- /dev/null +++ b/streaming/goroutine_pooled_client.go @@ -0,0 +1,252 @@ +package streaming + +import ( + "fmt" + "runtime" + "sync" + "time" + + "github.com/smallnest/chanx" +) + +// GoroutinePooledClient is an implementation of AbstractClient for streaming subscription. +type GoroutinePooledClient struct { + *subscriber + + exit chan bool + + queueHandlers sync.Map +} + +type queueHandlerBinder struct { + queue *chanx.UnboundedChan + handler MessageHandler +} + +// NewGoroutinePooledClient instantiates an instance of GoroutinePooledClient, +// which is used to listen on the listening port to receive subscription info. +// When listeningHost is "", the default host is the local address. +// When listeningPort is 0, the default port is the 8849. +func NewGoroutinePooledClient(listeningHost string, listeningPort int) *GoroutinePooledClient { + if listeningPort == 0 { + listeningPort = DefaultPort + } + + t := &GoroutinePooledClient{ + subscriber: newSubscriber(listeningHost, listeningPort), + exit: make(chan bool), + queueHandlers: sync.Map{}, + } + + go listening(t, listeningPort) + + go t.run() + + return t +} + +// Subscribe helps you to subscribe the specific action of the table according to the req. +func (t *GoroutinePooledClient) Subscribe(req *SubscribeRequest) error { + return t.subscribe(req) +} + +func (t *GoroutinePooledClient) subscribe(req *SubscribeRequest) error { + queue, err := t.subscribeInternal(req) + if err != nil { + fmt.Printf("Failed to subscribe: %s\n", err.Error()) + return err + } + + conn, err := newConnectedConn(req.Address) + if err != nil { + fmt.Printf("Failed to instantiate a connected conn: %s\n", err.Error()) + return err + } + + defer conn.Close() + + topicStr, err := t.getTopicFromServer(req.TableName, req.ActionName, conn) + if err != nil { + fmt.Printf("Failed to get topic from server: %s\n", err.Error()) + return err + } + + queueHandler := &queueHandlerBinder{ + queue: queue, + handler: req.Handler, + } + + if req.Handler == nil { + queueHandler.handler = &DefaultMessageHandler{} + } + + t.queueHandlers.Store(topicStr, queueHandler) + + return nil +} + +// UnSubscribe helps you to unsubscribe the specific action of the table according to the req. +func (t *GoroutinePooledClient) UnSubscribe(req *SubscribeRequest) error { + if err := t.unSubscribe(req); err != nil { + fmt.Printf("UnSubscribe Failed: %s\n", err.Error()) + return err + } + + conn, err := newConnectedConn(req.Address) + if err != nil { + fmt.Printf("Failed to instantiate a connected conn: %s\n", err.Error()) + return err + } + + defer conn.Close() + + topicStr, err := t.getTopicFromServer(req.TableName, req.ActionName, conn) + if err != nil { + fmt.Printf("Failed to get topic from server: %s\n", err.Error()) + return err + } + + t.queueHandlers.Delete(topicStr) + + return nil +} + +func (t *GoroutinePooledClient) getSubscriber() *subscriber { + return t.subscriber +} + +// IsClosed checks whether the client is closed. +func (t *GoroutinePooledClient) IsClosed() bool { + select { + case <-t.exit: + return true + default: + return false + } +} + +// Close closes the client and stop subscribing. +func (t *GoroutinePooledClient) Close() { + t.queueHandlers = sync.Map{} + close(t.exit) +} + +func (t *GoroutinePooledClient) doReconnect(s *site) bool { + topicStr := fmt.Sprintf("%s/%s/%s", s.address, s.tableName, s.actionName) + + req := &SubscribeRequest{ + Address: s.address, + TableName: s.tableName, + ActionName: s.actionName, + Handler: s.handler, + Offset: s.msgID + 1, + Filter: s.filter, + Reconnect: s.reconnect, + } + + if err := t.Subscribe(req); err != nil { + fmt.Printf("%s %s Unable to subscribe to the table. Try again after 1 second.\n", time.Now().UTC().String(), topicStr) + return false + } + + fmt.Printf("%s %s Successfully reconnected and subscribed.\n", time.Now().UTC().String(), topicStr) + return true +} + +func (t *GoroutinePooledClient) tryReconnect(topic string) bool { + topicRaw, ok := haTopicToTrueTopic.Load(topic) + if !ok { + return false + } + + queueMap.Delete(topicRaw) + + raw, ok := trueTopicToSites.Load(topicRaw) + if !ok { + return false + } + + sites := raw.([]*site) + + if len(sites) == 0 { + return false + } + + if len(sites) == 1 && !sites[0].reconnect { + return false + } + + site := getActiveSite(sites) + if site != nil { + if t.doReconnect(site) { + waitReconnectTopic.Delete(topicRaw) + return true + } + + waitReconnectTopic.Store(topicRaw, topicRaw) + return false + } + + return false +} + +func (t *GoroutinePooledClient) run() { + backLog := chanx.NewUnboundedChan(10) + + for !t.IsClosed() { + loop: + for { + select { + case val := <-backLog.Out: + msg := val.(IMessage) + raw, ok := t.queueHandlers.Load(msg.GetTopic()) + if !ok { + continue + } + + binder := raw.(*queueHandlerBinder) + + go func() { + binder.handler.DoEvent(msg) + }() + + default: + if backLog.Len() == 0 && backLog.BufLen() == 0 { + break loop + } + } + } + + t.refill(backLog) + } +} + +func (t *GoroutinePooledClient) refill(backLog *chanx.UnboundedChan) { + count := 200 + for !t.fillBackLog(backLog) { + if count < 100 { + runtime.Gosched() + } + + count-- + } +} + +func (t *GoroutinePooledClient) fillBackLog(backLog *chanx.UnboundedChan) bool { + filled := false + t.queueHandlers.Range(func(k, v interface{}) bool { + val := v.(*queueHandlerBinder) + msg := poll(val.queue) + if len(msg) > 0 { + for _, val := range msg { + backLog.In <- val + } + + filled = true + } + + return true + }) + + return filled +} diff --git a/streaming/goroutine_pooled_client_test.go b/streaming/goroutine_pooled_client_test.go new file mode 100644 index 0000000..b65e5fd --- /dev/null +++ b/streaming/goroutine_pooled_client_test.go @@ -0,0 +1,50 @@ +package streaming + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestGoroutinePooledClient(t *testing.T) { + tpc := NewGoroutinePooledClient(localhost, 2888) + + req := &SubscribeRequest{ + Address: testAddr, + TableName: "pooled", + ActionName: "action1", + Offset: 0, + Reconnect: true, + } + + req.SetBatchSize(10).SetThrottle(1) + + err := tpc.Subscribe(req) + assert.Nil(t, err) + + sendMoreSubscription(2888) + + sub := tpc.getSubscriber() + assert.Equal(t, sub.listeningPort, int32(2888)) + assert.Equal(t, strings.HasPrefix(sub.listeningHost, "127.0.0.1"), true) + + item := &reconnectItem{ + reconnectState: 1, + lastReconnectTimestamp: time.Now().UnixNano() / 1000000, + } + + item.putTopic("127.0.0.1:3000:local3000/pooled/action1") + reconnectTable.Store("127.0.0.1:3000:local3000", item) + + time.Sleep(3 * time.Second) + + err = tpc.UnSubscribe(req) + assert.Nil(t, err) + + req.ActionName = failedAction + err = tpc.Subscribe(req) + assert.Equal(t, err.Error(), "client error response. @K") + tpc.Close() +} diff --git a/streaming/handler_looper.go b/streaming/handler_looper.go new file mode 100644 index 0000000..0fab7a7 --- /dev/null +++ b/streaming/handler_looper.go @@ -0,0 +1,103 @@ +package streaming + +import ( + "time" + + "github.com/smallnest/chanx" +) + +type handlerLopper struct { + queue *chanx.UnboundedChan + handler MessageHandler + + batchSize *int + throttle *int + exit chan bool +} + +func (h *handlerLopper) getThrottle() int { + if h.throttle == nil { + return -1 + } + + return *h.throttle +} + +func (h *handlerLopper) getBatchSize() int { + if h.batchSize == nil { + return -1 + } + + return *h.batchSize +} + +func (h *handlerLopper) stop() { + select { + case <-h.exit: + default: + close(h.exit) + } +} + +func (h *handlerLopper) run() { + h.exit = make(chan bool) + for { + select { + case <-h.exit: + return + default: + h.handleMessage() + } + } +} + +func (h *handlerLopper) handleMessage() { + msg := make([]IMessage, 0) + batchSize := h.getBatchSize() + throttle := h.getThrottle() + switch { + case batchSize == -1 && throttle == -1: + v := <-h.queue.Out + msg = append(msg, v.(IMessage)) + case batchSize != -1 && throttle != -1: + end := time.Now().Add(time.Duration(throttle) * time.Millisecond) + for len(msg) == 0 || ((len(msg) == 0 || len(msg) < batchSize) && time.Now().Before(end)) { + tmp := poll(h.queue) + if tmp != nil { + if msg == nil { + msg = tmp + } else { + msg = append(msg, tmp...) + } + } + } + default: + end := time.Now().Add(time.Duration(throttle) * time.Millisecond) + for len(msg) == 0 || time.Now().Before(end) { + tmp := poll(h.queue) + if tmp != nil { + if msg == nil { + msg = tmp + } else { + msg = append(msg, tmp...) + } + } + } + } + + for _, v := range msg { + h.handler.DoEvent(v) + } +} + +func poll(queue *chanx.UnboundedChan) []IMessage { + res := make([]IMessage, 0) + for { + select { + case val := <-queue.Out: + res = append(res, val.(IMessage)) + default: + return res + } + } +} diff --git a/streaming/handler_looper_test.go b/streaming/handler_looper_test.go new file mode 100644 index 0000000..05e985e --- /dev/null +++ b/streaming/handler_looper_test.go @@ -0,0 +1 @@ +package streaming diff --git a/streaming/listening.go b/streaming/listening.go new file mode 100644 index 0000000..aa3b05e --- /dev/null +++ b/streaming/listening.go @@ -0,0 +1,85 @@ +package streaming + +import ( + "context" + "fmt" + "net" + "runtime" + "time" +) + +func listening(c AbstractClient, port int) { + address := &net.TCPAddr{ + Port: port, + } + + ln, err := net.ListenTCP("tcp", address) + if err != nil { + panic(fmt.Errorf("failed to listening 0.0.0.0:%d, %w", port, err)) + } + + defer func() { + err = ln.Close() + if err != nil { + fmt.Printf("Failed to close listening tcp server: %s\n", err.Error()) + } + }() + + ctx, f := context.WithCancel(context.TODO()) + + d := &reconnectDetector{ + AbstractClient: c, + } + + go d.run() + + cs := make([]net.Conn, 0) + for !c.IsClosed() { + conn, err := handleData(ctx, ln, c) + if err != nil { + time.Sleep(100 * time.Millisecond) + continue + } + + cs = append(cs, conn) + } + + f() + for _, v := range cs { + v.Close() + } +} + +func handleData(ctx context.Context, ln *net.TCPListener, c AbstractClient) (net.Conn, error) { + conn, err := ln.AcceptTCP() + if err != nil { + fmt.Printf("Failed to accept tcp: %s\n", err.Error()) + return nil, err + } + + err = conn.SetKeepAlive(true) + if err != nil { + fmt.Printf("Failed to set conn keepAlive: %s\n", err.Error()) + return nil, err + } + + mp := &messageParser{ + ctx: ctx, + Conn: conn, + subscriber: c.getSubscriber(), + topicNameToIndex: make(map[string]map[string]int), + } + + go mp.run() + + if runtime.GOOS != "linux" { + c := &connectionDetector{ + Conn: conn, + ctx: ctx, + } + + go c.run() + } + + return conn, nil +} diff --git a/streaming/listening_test.go b/streaming/listening_test.go new file mode 100644 index 0000000..05e985e --- /dev/null +++ b/streaming/listening_test.go @@ -0,0 +1 @@ +package streaming diff --git a/streaming/message_handler.go b/streaming/message_handler.go new file mode 100644 index 0000000..a0064e3 --- /dev/null +++ b/streaming/message_handler.go @@ -0,0 +1,41 @@ +package streaming + +import ( + "fmt" + "sync/atomic" + "time" +) + +// MessageHandler is an interface which will help you to handle message. +// You can implement the interface and use it when instantiate a SubscribeRequest or use DefaultMessageHandler by default. +type MessageHandler interface { + // DoEvent will be called when you subscribe with a goroutineClient or a goroutinePooledClient + DoEvent(msg IMessage) +} + +// DefaultMessageHandler is an implementation of MessageHandler, which is the default handler of IMessage. +type DefaultMessageHandler struct { + start bool + startTime int64 + count int64 +} + +// DoEvent will be called when you subscribe with a goroutineClient or a goroutinePooledClient. +func (d *DefaultMessageHandler) DoEvent(msg IMessage) { + if !d.start { + d.start = true + d.startTime = time.Now().Unix() + } + + atomic.AddInt64(&d.count, 1) + fmt.Printf("Get %d messages now.\n", d.count) + if d.count%100000 == 0 { + end := time.Now().Unix() + fmt.Printf("%d messages took %d ms total, through: %d messages/s\n", + d.count, end-d.startTime, d.count/(end-d.startTime)) + } + + if d.count == 2000000 { + fmt.Println("Done") + } +} diff --git a/streaming/message_handler_test.go b/streaming/message_handler_test.go new file mode 100644 index 0000000..05e985e --- /dev/null +++ b/streaming/message_handler_test.go @@ -0,0 +1 @@ +package streaming diff --git a/streaming/message_parser.go b/streaming/message_parser.go new file mode 100644 index 0000000..2b98ff9 --- /dev/null +++ b/streaming/message_parser.go @@ -0,0 +1,167 @@ +package streaming + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "strings" + + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/dolphindb/api-go/model" +) + +type messageParser struct { + ctx context.Context + net.Conn + *subscriber + + topic string + topicNameToIndex map[string]map[string]int +} + +func (m *messageParser) run() { + if err := m.parse(); err != nil { + if IsClosed(m.topic) { + return + } + + setNeedReconnect(m.topic, 1) + } +} + +func (m *messageParser) parseHeader(r protocol.Reader, bo protocol.ByteOrder) (uint64, error) { + byts, err := r.ReadCertainBytes(16) + if err != nil { + fmt.Printf("Failed to read msgID from conn: %s\n", err.Error()) + return 0, err + } + + msgID := bo.Uint64(byts[8:]) + byts, err = r.ReadBytes(protocol.StringSep) + if err != nil { + fmt.Printf("Failed to read topic from conn: %s\n", err.Error()) + return 0, err + } + + m.topic = string(byts) + + return msgID, nil +} + +func (m *messageParser) parse() error { + r := protocol.NewReader(m.Conn) + for !m.IsClosed() { + b, err := r.ReadByte() + if err != nil { + if errors.Is(err, io.EOF) { + return nil + } + + fmt.Printf("Failed to read ByteOrder byte from conn: %s\n", err.Error()) + return err + } + + bo := protocol.GetByteOrder('1') + if b != 1 { + bo = protocol.BigEndian + } + + msgID, err := m.parseHeader(r, bo) + if err != nil { + fmt.Printf("Failed to parse header: %s\n", err.Error()) + return err + } + + df, err := model.ParseDataForm(r, bo) + if err != nil { + fmt.Printf("Failed to parse DataForm: %s\n", err.Error()) + return err + } + + switch { + case df.GetDataForm() == model.DfTable: + m.handleTable(df.(*model.Table)) + case df.GetDataForm() == model.DfVector: + m.handleVector(msgID, df.(*model.Vector)) + default: + fmt.Println("Invalid format in the message body. Vector or table is expected") + } + } + + return nil +} + +func (m *messageParser) handleTable(tb *model.Table) { + if tb.Rows() != 0 { + fmt.Println("Invalid format in the message body. Vector or table is expected") + return + } + + for _, v := range strings.Split(m.topic, ",") { + setNeedReconnect(v, 0) + } + + nameToIndex := make(map[string]int) + count := 0 + for _, v := range tb.ColNames { + nameToIndex[strings.ToLower(v)] = count + count++ + } + + m.topicNameToIndex[m.topic] = nameToIndex +} + +func (m *messageParser) handleVector(msgID uint64, vct *model.Vector) { + colSize := vct.Rows() + rowSize := vct.Data.ElementValue(0).(model.DataForm).Rows() + if rowSize > 1 { + msgs := make([]IMessage, rowSize) + st := msgID - uint64(rowSize) + 1 + for i := 0; i < rowSize; i++ { + dts := make([]model.DataForm, colSize) + for j := 0; j < colSize; j++ { + df := vct.Data.ElementValue(j).(*model.Vector) + if df.GetDataType() > 64 && df.GetDataType() < 128 { + d, _ := model.NewDataType(model.DtAny, df.GetVectorValue(i)) + dts[j] = model.NewScalar(d) + } else { + dts[j] = model.NewScalar(df.Get(i)) + } + } + + dtl, _ := model.NewDataTypeListWithRaw(model.DtAny, dts) + topics := strings.Split(m.topic, ",") + msg := &Message{ + offset: int64(st), + topic: m.topic, + msg: model.NewVector(dtl), + nameToIndex: m.topicNameToIndex[topics[0]], + } + + msgs[i] = msg + } + + batchDispatch(msgs) + } else if rowSize == 1 { + topics := strings.Split(m.topic, ",") + msg := &Message{ + offset: int64(msgID), + topic: m.topic, + msg: vct, + nameToIndex: m.topicNameToIndex[topics[0]], + } + + dispatch(msg) + } +} + +func (m *messageParser) IsClosed() bool { + select { + case <-m.ctx.Done(): + return true + default: + return false + } +} diff --git a/streaming/message_parser_test.go b/streaming/message_parser_test.go new file mode 100644 index 0000000..05e985e --- /dev/null +++ b/streaming/message_parser_test.go @@ -0,0 +1 @@ +package streaming diff --git a/streaming/polling_client.go b/streaming/polling_client.go new file mode 100644 index 0000000..02e57d4 --- /dev/null +++ b/streaming/polling_client.go @@ -0,0 +1,149 @@ +package streaming + +import ( + "fmt" + "time" +) + +// PollingClient is a client for streaming subscription, which allows you to get subscription from the topicPoller. +type PollingClient struct { + *subscriber + + topicPoller *TopicPoller + + exit chan bool +} + +// NewPollingClient instantiates a new polling client and listens on the listening port to get subscription. +func NewPollingClient(listeningHost string, listeningPort int) *PollingClient { + if listeningPort == 0 { + listeningPort = DefaultPort + } + + t := &PollingClient{ + subscriber: newSubscriber(listeningHost, listeningPort), + exit: make(chan bool), + } + + go listening(t, listeningPort) + return t +} + +// Subscribe helps you to subscribe the specific action of the table according to the req. +func (t *PollingClient) Subscribe(req *SubscribeRequest) (*TopicPoller, error) { + err := t.subscribe(req) + if err != nil { + fmt.Printf("Failed to subscribe topic: %s\n", err.Error()) + return nil, err + } + + return t.topicPoller, nil +} + +func (t *PollingClient) subscribe(req *SubscribeRequest) error { + queue, err := t.subscribeInternal(req) + if err != nil { + return err + } + + t.topicPoller = &TopicPoller{ + queue: queue, + } + + return nil +} + +// UnSubscribe helps you to unsubscribe the specific action of the table according to the req. +func (t *PollingClient) UnSubscribe(req *SubscribeRequest) error { + return t.unSubscribe(req) +} + +// Close closes the client. +func (t *PollingClient) Close() { + t.stop() +} + +func (t *PollingClient) getSubscriber() *subscriber { + return t.subscriber +} + +// IsClosed checks whether the client is closed. +func (t *PollingClient) IsClosed() bool { + select { + case <-t.exit: + return true + default: + return false + } +} + +func (t *PollingClient) doReconnect(s *site) bool { + time.Sleep(1 * time.Second) + + req := &SubscribeRequest{ + Address: s.address, + TableName: s.tableName, + ActionName: s.actionName, + Handler: s.handler, + Offset: s.msgID + 1, + Filter: s.filter, + Reconnect: s.reconnect, + AllowExists: s.AllowExists, + } + + queue, err := t.subscribeInternal(req) + if err != nil { + fmt.Printf("%s Unable to subscribe to the table. Try again after 1 second.\n", time.Now().UTC().String()) + return false + } + + t.topicPoller.queue = queue + + fmt.Printf("%s Successfully reconnected and subscribed.\n", time.Now().UTC().String()) + return true +} + +func (t *PollingClient) stop() { + select { + case <-t.exit: + default: + close(t.exit) + } +} + +func (t *PollingClient) tryReconnect(topic string) bool { + topicRaw, ok := haTopicToTrueTopic.Load(topic) + if !ok { + return false + } + + queueMap.Delete(topicRaw) + + raw, ok := trueTopicToSites.Load(topicRaw) + if !ok { + return false + } + + sites := raw.([]*site) + + if len(sites) == 0 { + return false + } + + if len(sites) == 1 && !sites[0].reconnect { + return false + } + + site := getActiveSite(sites) + if site != nil { + if t.doReconnect(site) { + waitReconnectTopic.Delete(topicRaw) + return true + } + + waitReconnectTopic.Store(topicRaw, topicRaw) + return false + } + + return false +} diff --git a/streaming/polling_client_test.go b/streaming/polling_client_test.go new file mode 100644 index 0000000..4969b60 --- /dev/null +++ b/streaming/polling_client_test.go @@ -0,0 +1,241 @@ +package streaming + +import ( + "context" + "fmt" + "net" + "os" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +const testAddr = "127.0.0.1:3000" + +var subscribeServer = make([]net.Conn, 0) + +var ( + successResponse = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x30, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a} + versionResponse = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, + 0x12, 0x00, 0x31, 0x20, 0x63, 0x00} + + pollingGetSubscriptionTopicResponse = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, + 0x19, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, + 0x3a, 0x33, 0x30, 0x30, 0x30, 0x3a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x33, 0x30, 0x30, 0x30, 0x2f, 0x70, 0x6f, 0x6c, 0x6c, 0x69, + 0x6e, 0x67, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x31, 0x00} + pollingGetSubscriptionTopicFailedResponse = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x40, 0x4b, 0x0a} + pollingPublishTableResponse = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, + 0x19, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, + 0x3a, 0x33, 0x30, 0x30, 0x30, 0x3a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x33, 0x30, 0x30, 0x30, 0x00} + + clientGetSubscriptionTopicResponse = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, + 0x19, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, + 0x3a, 0x33, 0x30, 0x30, 0x30, 0x3a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x33, 0x30, 0x30, 0x30, 0x2f, 0x74, 0x68, 0x72, 0x65, 0x61, + 0x64, 0x65, 0x64, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x31, 0x00} + clientGetSubscriptionTopicFailedResponse = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x40, 0x4b, 0x0a} + clientgPublishTableResponse = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, + 0x12, 0x00, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x33, 0x30, 0x30, 0x30, 0x00} + + pooledGetSubscriptionTopicResponse = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, + 0x19, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x12, 0x00, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, + 0x3a, 0x33, 0x30, 0x30, 0x30, 0x3a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x33, 0x30, 0x30, 0x30, 0x2f, 0x70, 0x6f, 0x6f, 0x6c, 0x65, + 0x64, 0x2f, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x31, 0x00} + pooledGetSubscriptionTopicFailedResponse = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x40, 0x4b, 0x0a} + pooledgPublishTableResponse = []byte{0x32, 0x30, 0x32, 0x36, 0x37, 0x33, 0x35, 0x39, 0x20, 0x31, 0x20, 0x31, 0x0a, 0x4f, 0x4b, 0x0a, + 0x19, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, + 0x3a, 0x33, 0x30, 0x30, 0x30, 0x3a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x33, 0x30, 0x30, 0x30, 0x00} +) + +func TestPollingClient(t *testing.T) { + pc := NewPollingClient(localhost, 3111) + + req := &SubscribeRequest{ + Address: testAddr, + TableName: "polling", + ActionName: "action1", + Offset: 0, + Reconnect: true, + } + + poller, err := pc.Subscribe(req) + assert.Nil(t, err) + + msg := poller.Poll(10, 10) + assert.Equal(t, len(msg), 0) + + sendOneSubscription(DefaultPort) + + sub := pc.getSubscriber() + assert.Equal(t, sub.listeningPort, int32(3111)) + assert.Equal(t, strings.HasPrefix(sub.listeningHost, "127.0.0.1"), true) + + item := &reconnectItem{ + reconnectState: 1, + lastReconnectTimestamp: time.Now().UnixNano() / 1000000, + } + + item.putTopic("127.0.0.1:3000:local3000/polling/action1") + reconnectTable.Store("127.0.0.1:3000:local3000", item) + + time.Sleep(4 * time.Second) + + err = pc.UnSubscribe(req) + assert.Nil(t, err) + + req.ActionName = failedAction + _, err = pc.Subscribe(req) + assert.Equal(t, err.Error(), "client error response. @K") + + pc.Close() +} + +func TestMain(m *testing.M) { + exit := make(chan bool) + ln, err := net.Listen("tcp", testAddr) + if err != nil { + return + } + + go func() { + for !isExit(exit) { + conn, err := ln.Accept() + if err != nil { + return + } + + go handleTestData(conn) + } + + ln.Close() + }() + + exitCode := m.Run() + + close(exit) + + reconnectTable.Delete("127.0.0.1:3000:local3000") + queueMap.Delete("topic") + haTopicToTrueTopic.Delete("topic") + messageCache.Delete("topic") + trueTopicToSites.Delete("topic") + + for _, v := range subscribeServer { + v.Close() + } + + os.Exit(exitCode) +} + +func sendOneSubscription(port int) { + d := net.Dialer{} + + dc, err := d.DialContext(context.TODO(), "tcp", fmt.Sprintf("127.0.0.1:%d", port)) + if err != nil { + return + } + + _, err = dc.Write([]byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x3a, 0x33, 0x30, 0x30, 0x30, 0x3a, + 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x33, 0x30, 0x30, 0x30, 0x00, 0x19, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, + 0x00, 0x00, 0x00, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x70, 0x6e, 0x6f, 0x00}) + if err != nil { + return + } + subscribeServer = append(subscribeServer, dc) +} + +func sendMoreSubscription(port int) { + d := net.Dialer{} + + dc, err := d.DialContext(context.TODO(), "tcp", fmt.Sprintf("127.0.0.1:%d", port)) + if err != nil { + return + } + + _, _ = dc.Write([]byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, 0x3a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x33, 0x30, 0x30, 0x30, 0x2f, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x00, 0x63, 0x6f, 0x6c, 0x00, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x00, 0x00, 0x63, 0x6f, 0x6c, 0x31, 0x00}) + + _, _ = dc.Write([]byte{0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x31, 0x32, 0x37, 0x2e, 0x30, 0x2e, 0x30, 0x2e, 0x31, + 0x3a, 0x33, 0x30, 0x30, 0x30, 0x3a, 0x6c, 0x6f, 0x63, 0x61, 0x6c, 0x33, 0x30, 0x30, 0x30, 0x00, 0x19, + 0x01, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x12, 0x01, 0x02, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x70, + 0x6e, 0x6f, 0x00, 0x70, 0x6f, 0x6e, 0x00}) + + subscribeServer = append(subscribeServer, dc) +} + +func handleTestData(conn net.Conn) { + res := make([]byte, 0) + for { + buf := make([]byte, 512) + l, err := conn.Read(buf) + if err != nil { + continue + } + + res = append(res, buf[0:l]...) + tmp := string(res) + length := len(res) + var resp []byte + switch { + case length == 85 && strings.Contains(tmp, "getSubscriptionTopic"): + resp = pollingGetSubscriptionTopicFailedResponse + case length == 86 && strings.Contains(tmp, "getSubscriptionTopic"): + resp = clientGetSubscriptionTopicFailedResponse + case length == 34 && strings.Contains(tmp, "version"): + resp = versionResponse + case isSuccessRequest(length): + resp = successResponse + case length == 80 && strings.Contains(tmp, "getSubscriptionTopic") && strings.Contains(tmp, "polling"): + resp = pollingGetSubscriptionTopicResponse + case length == 103 && strings.Contains(tmp, "publishTable") && strings.Contains(tmp, "polling"): + resp = pollingPublishTableResponse + case length == 81 && strings.Contains(tmp, "getSubscriptionTopic"): + resp = clientGetSubscriptionTopicResponse + case length == 104 && strings.Contains(tmp, "publishTable"): + resp = clientgPublishTableResponse + case length == 79 && strings.Contains(tmp, "getSubscriptionTopic") && strings.Contains(tmp, "pooled"): + resp = pooledGetSubscriptionTopicResponse + case length == 84 && strings.Contains(tmp, "getSubscriptionTopic"): + resp = pooledGetSubscriptionTopicFailedResponse + case length == 102 && strings.Contains(tmp, "publishTable"): + resp = pooledgPublishTableResponse + } + + if resp != nil { + _, err = conn.Write(resp) + if err != nil { + return + } + + res = make([]byte, 0) + } + } +} + +var successLength = []int{49, 15, 25, 254, 86, 94, 87, 95, 85, 93} + +func isSuccessRequest(l int) bool { + for _, v := range successLength { + if v == l { + return true + } + } + + return false +} + +func isExit(exit <-chan bool) bool { + select { + case <-exit: + return true + default: + return false + } +} diff --git a/streaming/reconnect_detector.go b/streaming/reconnect_detector.go new file mode 100644 index 0000000..e918d04 --- /dev/null +++ b/streaming/reconnect_detector.go @@ -0,0 +1,70 @@ +package streaming + +import ( + "fmt" + "time" +) + +type reconnectDetector struct { + AbstractClient +} + +func (r *reconnectDetector) run() { + for !r.IsClosed() { + for _, site := range getAllReconnectSites() { + err := r.handleReconnectSites(site) + if err != nil { + return + } + } + + waitReconnectTopic.Range(func(k, v interface{}) bool { + val := k.(string) + r.tryReconnect(val) + return true + }) + + time.Sleep(1 * time.Second) + } +} + +func (r *reconnectDetector) handleReconnectSites(site string) error { + if getNeedReconnect(site) == 1 { + s := getSiteByName(site) + if s == nil { + return nil + } + + err := r.activeCloseConnection(s) + if err != nil { + fmt.Printf("Failed to reconnect closed connection: %s\n", err.Error()) + return err + } + + lastTopic := "" + for _, topic := range getAllTopicBySite(site) { + r.tryReconnect(topic) + lastTopic = topic + } + + setNeedReconnect(lastTopic, 2) + } else { + ts := getReconnectTimestamp(site) + if time.Now().UnixNano()/1000000 >= ts+3000 { + s := getSiteByName(site) + err := r.activeCloseConnection(s) + if err != nil { + fmt.Printf("Failed to reconnect closed connection: %s\n", err.Error()) + return err + } + + for _, v := range getAllTopicBySite(site) { + r.tryReconnect(v) + } + + setReconnectTimestamp(site, time.Now().UnixNano()/1000000) + } + } + + return nil +} diff --git a/streaming/reconnect_detector_test.go b/streaming/reconnect_detector_test.go new file mode 100644 index 0000000..05e985e --- /dev/null +++ b/streaming/reconnect_detector_test.go @@ -0,0 +1 @@ +package streaming diff --git a/streaming/reconnect_item.go b/streaming/reconnect_item.go new file mode 100644 index 0000000..68fa44f --- /dev/null +++ b/streaming/reconnect_item.go @@ -0,0 +1,33 @@ +package streaming + +type reconnectItem struct { + reconnectState int + lastReconnectTimestamp int64 + topics []string +} + +func (r *reconnectItem) getState() int { + return r.reconnectState +} + +func (r *reconnectItem) setState(state int) *reconnectItem { + r.reconnectState = state + return r +} + +func (r *reconnectItem) getTimeStamp() int64 { + return r.lastReconnectTimestamp +} + +func (r *reconnectItem) setTimeStamp(stamp int64) { + r.lastReconnectTimestamp = stamp +} + +func (r *reconnectItem) putTopic(topic string) { + if r.topics == nil { + r.topics = make([]string, 1) + r.topics[0] = topic + } else if !contains(r.topics, topic) { + r.topics = append(r.topics, topic) + } +} diff --git a/streaming/reconnect_item_test.go b/streaming/reconnect_item_test.go new file mode 100644 index 0000000..05e985e --- /dev/null +++ b/streaming/reconnect_item_test.go @@ -0,0 +1 @@ +package streaming diff --git a/streaming/subscriber.go b/streaming/subscriber.go new file mode 100644 index 0000000..17f183e --- /dev/null +++ b/streaming/subscriber.go @@ -0,0 +1,311 @@ +package streaming + +import ( + "fmt" + "strconv" + "strings" + "time" + + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/model" + + "github.com/smallnest/chanx" +) + +type subscriber struct { + listeningHost string + listeningPort int32 +} + +// SubscribeRequest is used for subscribing. +type SubscribeRequest struct { + // Server address + Address string + // Name of the table to be subscribed + TableName string + // Name of the subscription task + ActionName string + // The amount of data processed at one time + BatchSize *int + // Offset of subscription + Offset int64 + // When AllowExists=true, if the topic already exists before subscribing, + // the server will not throw an exception. + AllowExists bool + // timeout. unit: millisecond + Throttle *int + // whether to allow reconnection + Reconnect bool + + // Specify parameter Filter with function setStreamTableFilterColumn. + // SetStreamTableFilterColumn specifies the filtering column of a stream table. + // Only the messages with filtering column values in filter are subscribed. + Filter *model.Vector + // handle subscription information + Handler MessageHandler +} + +type site struct { + address string + tableName string + actionName string + msgID int64 + reconnect bool + AllowExists bool + closed bool + + filter *model.Vector + handler MessageHandler +} + +// SetBatchSize sets the batch size. +func (s *SubscribeRequest) SetBatchSize(bs int) *SubscribeRequest { + s.BatchSize = &bs + + return s +} + +// SetThrottle sets the throttle. +func (s *SubscribeRequest) SetThrottle(th int) *SubscribeRequest { + s.Throttle = &th + + return s +} + +func newSubscriber(subscribeHost string, subscribePort int) *subscriber { + return &subscriber{ + listeningHost: subscribeHost, + listeningPort: int32(subscribePort), + } +} + +func (s *subscriber) subscribeInternal(req *SubscribeRequest) (*chanx.UnboundedChan, error) { + conn, err := newConnectedConn(req.Address) + if err != nil { + fmt.Printf("Failed to connect to server: %s\n", err.Error()) + return nil, err + } + + defer conn.Close() + + topic, err := s.getTopicFromServer(req.TableName, req.ActionName, conn) + if err != nil { + fmt.Printf("Failed to get topic from server: %s\n", err.Error()) + return nil, err + } + + if s.listeningHost == "" || strings.ToLower(s.listeningHost) == localhost { + s.listeningHost = conn.GetLocalAddress() + } + + pubReq, err := generatePublishTableParams(req, s.listeningHost, s.listeningPort) + if err != nil { + fmt.Printf("Failed to generate the params of PublishTable: %s\n", err.Error()) + return nil, err + } + + df, err := conn.RunFunc("publishTable", pubReq) + if err != nil { + fmt.Printf("Failed to publish table: %s\n", err.Error()) + return nil, err + } + + if df.GetDataForm() == model.DfVector && df.GetDataType() == model.DtAny { + err = s.handleAnyVector(topic, df, req) + if err != nil { + fmt.Printf("Failed to handle vector: %s\n", err.Error()) + return nil, err + } + } else { + s.packSite(topic, req) + } + + return addQueue(topic) +} + +func (s *subscriber) packSite(topic string, req *SubscribeRequest) { + si := &site{ + address: req.Address, + tableName: req.TableName, + actionName: req.ActionName, + handler: req.Handler, + msgID: req.Offset - 1, + reconnect: req.Reconnect, + filter: req.Filter, + AllowExists: req.AllowExists, + } + + haTopicToTrueTopic.Store(topic, topic) + trueTopicToSites.Store(topic, []*site{si}) +} + +func (s *subscriber) getTopicFromServer(tableName, actionName string, conn dialer.Conn) (string, error) { + params, err := generatorGetSubscriptionTopicParams(tableName, actionName) + if err != nil { + fmt.Printf("Failed to generate the params of GetSubscriptionTopic: %s\n", err.Error()) + return "", err + } + + df, err := conn.RunFunc("getSubscriptionTopic", params) + if err != nil { + fmt.Printf("Failed to call getSubscriptionTopic: %s\n", err.Error()) + return "", err + } + + vct := df.(*model.Vector) + sca := vct.Data.ElementValue(0).(*model.Scalar) + return sca.DataType.String(), nil +} + +func (s *subscriber) handleAnyVector(topic string, df model.DataForm, req *SubscribeRequest) error { + vct := df.(*model.Vector) + v := vct.Data.ElementValue(1).(*model.Vector) + HASiteStrings := v.Data.StringList() + sites := make([]*site, len(HASiteStrings)) + for k, v := range HASiteStrings { + str := strings.Split(v, ":") + host := str[0] + port, err := strconv.Atoi(str[1]) + if err != nil { + fmt.Printf("Failed to parse server port: %s\n", err.Error()) + return err + } + + alias := str[2] + + sites[k] = &site{ + address: fmt.Sprintf("%s:%d", host, port), + tableName: req.TableName, + actionName: req.ActionName, + msgID: req.Offset - 1, + handler: req.Handler, + reconnect: true, + filter: req.Filter, + AllowExists: req.AllowExists, + } + + haTopicToTrueTopic.Store(fmt.Sprintf("%s:%d:%s/%s/%s", host, port, alias, req.TableName, req.ActionName), topic) + } + + trueTopicToSites.Store(topic, sites) + + return nil +} + +func (s *subscriber) activeCloseConnection(si *site) error { + conn, err := newConnectedConn(si.address) + if err != nil { + fmt.Printf("Failed to new a connected connection: %s\n", err.Error()) + return err + } + + defer conn.Close() + + df, err := conn.RunScript("version()") + if err != nil { + fmt.Printf("Failed to call vesion(): %s\n", err.Error()) + return err + } + + sca := df.(*model.Scalar) + verStr := sca.DataType.String() + verNum := getVersionNum(verStr) + if s.listeningHost == "" || strings.ToLower(s.listeningHost) == localhost { + s.listeningHost = conn.GetLocalAddress() + } + + params, err := s.packParams(verNum) + if err != nil { + fmt.Printf("Failed to pack params: %s\n", err.Error()) + return err + } + + _, err = conn.RunFunc("activeClosePublishConnection", params) + if err != nil { + fmt.Printf("Failed to call activeClosePublishConnection: %s\n", err.Error()) + return err + } + + time.Sleep(1 * time.Second) + return nil +} + +func (s *subscriber) packParams(verNum int) ([]model.DataForm, error) { + params := make([]model.DataForm, 3) + + localIP, err := model.NewDataType(model.DtString, s.listeningHost) + if err != nil { + fmt.Printf("Failed to instantiate DataType with listeningHost: %s\n", err.Error()) + return nil, err + } + + params[0] = model.NewScalar(localIP) + + port, err := model.NewDataType(model.DtInt, s.listeningPort) + if err != nil { + fmt.Printf("Failed to instantiate DataType with listeningPort: %s\n", err.Error()) + return nil, err + } + + params[1] = model.NewScalar(port) + if verNum >= 955 { + tmp, err := model.NewDataType(model.DtBool, byte(1)) + if err != nil { + fmt.Printf("Failed to instantiate DataType with bool value: %s\n", err.Error()) + return nil, err + } + + params[2] = model.NewScalar(tmp) + } + + return params, nil +} + +func (s *subscriber) unSubscribe(req *SubscribeRequest) error { + conn, err := newConnectedConn(req.Address) + if err != nil { + fmt.Printf("Failed to new connected conn: %s\n", err.Error()) + return err + } + + defer conn.Close() + + if s.listeningHost == "" || strings.ToLower(s.listeningHost) == localhost { + s.listeningHost = conn.GetLocalAddress() + } + + stopReq, err := generateStopPublishTableParams(req, s.listeningHost, s.listeningPort) + if err != nil { + fmt.Printf("Failed to generate the params of stopPublishTable: %s\n", err.Error()) + return err + } + + _, err = conn.RunFunc("stopPublishTable", stopReq) + if err != nil { + fmt.Printf("Failed to call stopPublishTable: %s\n", err.Error()) + return err + } + + topic, err := s.getTopicFromServer(req.TableName, req.ActionName, conn) + if err != nil { + fmt.Printf("Failed to get topic from server: %s\n", err.Error()) + return nil + } + + queueMap.Delete(topic) + + fmt.Println("Successfully unsubscribe from the table ", topic) + + raw, ok := trueTopicToSites.Load(topic) + if !ok { + return nil + } + + sites := raw.([]*site) + + for _, v := range sites { + v.closed = true + } + + return nil +} diff --git a/streaming/subscriber_test.go b/streaming/subscriber_test.go new file mode 100644 index 0000000..05e985e --- /dev/null +++ b/streaming/subscriber_test.go @@ -0,0 +1 @@ +package streaming diff --git a/streaming/topic_poller.go b/streaming/topic_poller.go new file mode 100644 index 0000000..dddd8f8 --- /dev/null +++ b/streaming/topic_poller.go @@ -0,0 +1,59 @@ +package streaming + +import ( + "time" + + "github.com/smallnest/chanx" +) + +// TopicPoller is used to take one or more subscription info for polling client. +type TopicPoller struct { + queue *chanx.UnboundedChan + cache []IMessage +} + +// Poll retrieves and removes the head of this queue, waiting up to the specified +// wait time if necessary for an element to become available. +func (t *TopicPoller) Poll(timeout, size int) []IMessage { + l := make([]IMessage, 0, len(t.cache)) + copy(l, t.cache) + t.cache = make([]IMessage, 0) + end := time.Now().Add(time.Duration(timeout) * time.Millisecond) + for len(l) < size && time.Now().Before(end) { + select { + case v := <-t.queue.Out: + if v != nil { + l = append(l, v.(IMessage)) + } + default: + continue + } + } + + return l +} + +// Take retrieves and removes the head of this queue, waiting if necessary until an element becomes available. +func (t *TopicPoller) Take() IMessage { + for { + if len(t.cache) > 0 { + msg := t.cache[0] + t.cache = t.cache[1:] + return msg + } + + loop: + for { + select { + case val := <-t.queue.Out: + if val != nil { + t.cache = append(t.cache, val.(IMessage)) + } + default: + if len(t.cache) > 0 { + break loop + } + } + } + } +} diff --git a/streaming/topic_poller_test.go b/streaming/topic_poller_test.go new file mode 100644 index 0000000..0fd502d --- /dev/null +++ b/streaming/topic_poller_test.go @@ -0,0 +1,49 @@ +package streaming + +import ( + "testing" + + "github.com/smallnest/chanx" + "github.com/stretchr/testify/assert" +) + +func TestTopicPoller(t *testing.T) { + tp := &TopicPoller{ + queue: chanx.NewUnboundedChan(1), + cache: make([]IMessage, 0), + } + var msg IMessage + go func() { + msg = tp.Take() + }() + + tp.queue.In <- &Message{ + offset: 0, + topic: "topic", + } + //nolint + for msg == nil { + // loop + } + + assert.Equal(t, msg.GetOffset(), int64(0)) + assert.Equal(t, msg.GetTopic(), "topic") + + var ms []IMessage + go func() { + ms = tp.Poll(1, 1) + }() + + tp.queue.In <- &Message{ + offset: 1, + topic: "topic1", + } + + //nolint + for ms == nil { + // loop + } + + assert.Equal(t, ms[0].GetOffset(), int64(1)) + assert.Equal(t, ms[0].GetTopic(), "topic1") +} diff --git a/streaming/util.go b/streaming/util.go new file mode 100644 index 0000000..36c5e8e --- /dev/null +++ b/streaming/util.go @@ -0,0 +1,379 @@ +package streaming + +import ( + "context" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/model" + + "github.com/smallnest/chanx" +) + +func getReconnectTimestamp(site string) int64 { + raw, ok := reconnectTable.Load(site) + if ok && raw != nil { + item := raw.(*reconnectItem) + return item.getTimeStamp() + } + + return 0 +} + +func contains(src []string, sub string) bool { + for _, v := range src { + if v == sub { + return true + } + } + + return false +} + +func addQueue(topic string) (*chanx.UnboundedChan, error) { + if _, ok := queueMap.Load(topic); ok { + return nil, fmt.Errorf("topic %s already subscribed", topic) + } + + q := chanx.NewUnboundedChan(4096) + queueMap.Store(topic, q) + return q, nil +} + +func getAllReconnectSites() []string { + res := make([]string, 0) + reconnectTable.Range(func(k, v interface{}) bool { + key := k.(string) + val := v.(*reconnectItem) + if val.getState() > 0 { + res = append(res, key) + } + + return true + }) + + return res +} + +func dispatch(msg IMessage) { + topicStr := msg.GetTopic() + topics := strings.Split(topicStr, ",") + for _, v := range topics { + topic, ok := haTopicToTrueTopic.Load(v) + if !ok { + continue + } + + raw, ok := queueMap.Load(topic) + if ok && raw != nil { + q := raw.(*chanx.UnboundedChan) + q.In <- msg + } + } +} + +func batchDispatch(msg []IMessage) { + for _, v := range msg { + addMessageToCache(v) + } + + flushToQueue() +} + +func addMessageToCache(msg IMessage) { + topicStr := msg.GetTopic() + topics := strings.Split(topicStr, ",") + for _, v := range topics { + topic, ok := haTopicToTrueTopic.Load(v) + if !ok { + continue + } + + cache := make([]IMessage, 0) + + raw, ok := messageCache.Load(topic) + if ok { + cache = raw.([]IMessage) + } + + cache = append(cache, msg) + messageCache.Store(topic, cache) + } +} + +func flushToQueue() { + messageCache.Range(func(k, v interface{}) bool { + val := v.([]IMessage) + + raw, ok := queueMap.Load(k) + if ok && raw != nil { + q := raw.(*chanx.UnboundedChan) + for _, m := range val { + q.In <- m + } + } + return true + }) + + messageCache = sync.Map{} +} + +func getAllTopicBySite(site string) []string { + res := make([]string, 0) + trueTopicToSites.Range(func(k, v interface{}) bool { + key := k.(string) + + s := key[0:strings.Index(key, "/")] + if s == site { + res = append(res, key) + } + + return true + }) + + return res +} + +func getNeedReconnect(site string) int { + raw, ok := reconnectTable.Load(site) + if ok && raw != nil { + item := raw.(*reconnectItem) + return item.getState() + } + + return 0 +} + +func newConnectedConn(address string) (dialer.Conn, error) { + conn, err := dialer.NewConn(context.TODO(), address, nil) + if err != nil { + fmt.Printf("Failed to new a conn: %s\n", err.Error()) + return nil, err + } + + err = conn.Connect() + if err != nil { + fmt.Printf("Failed to connect to server: %s\n", err.Error()) + return nil, err + } + + return conn, err +} + +func getActiveSite(sites []*site) *site { + ind := 0 + siteNum := len(sites) + for ind < siteNum { + si := sites[ind] + ind = (ind + 1) % siteNum + + conn, err := newConnectedConn(si.address) + if err != nil { + fmt.Printf("Failed to instantiate a connected conn: %s\n", err.Error()) + continue + } + + _, err = conn.RunScript("1") + if err != nil { + fmt.Printf("Failed to call 1: %s\n", err.Error()) + continue + } + + conn.Close() + + return si + } + + return nil +} + +func setReconnectTimestamp(site string, v int64) { + raw, ok := reconnectTable.Load(site) + if ok && raw != nil { + s := raw.(*reconnectItem) + s.setTimeStamp(v) + } +} + +func setNeedReconnect(topic string, v int) { + if topic == "" { + return + } + + site := topic[0:strings.Index(topic, "/")] + if raw, ok := reconnectTable.Load(site); ok { + item := raw.(*reconnectItem) + item.setState(v).setTimeStamp(time.Now().UnixNano() / 1000000) + } else { + item := &reconnectItem{ + reconnectState: v, + lastReconnectTimestamp: time.Now().UnixNano() / 1000000, + } + + item.putTopic(topic) + reconnectTable.Store(site, item) + } +} + +func getSiteByName(si string) *site { + topics := getAllTopicBySite(si) + if len(topics) > 0 { + raw, ok := trueTopicToSites.Load(topics[0]) + if !ok { + return nil + } + + sites := raw.([]*site) + if len(sites) > 0 { + return getActiveSite(sites) + } + } + + return nil +} + +// IsClosed checks whether the topic is closed. +func IsClosed(topic string) bool { + raw, ok := haTopicToTrueTopic.Load(topic) + if !ok { + return true + } + + topic = raw.(string) + + raw, ok = trueTopicToSites.Load(topic) + if !ok { + return true + } + + sites := raw.([]*site) + if len(sites) == 0 { + return true + } + + return sites[0].closed +} + +func generatorGetSubscriptionTopicParams(tableName, actionName string) ([]model.DataForm, error) { + l, err := model.NewDataTypeListWithRaw(model.DtString, []string{tableName, actionName}) + if err != nil { + fmt.Printf("Failed to instantiate DataTypeList: %s\n", err.Error()) + return nil, err + } + + dfl := make([]model.DataForm, 2) + dfl[0] = model.NewScalar(l.Get(0)) + dfl[1] = model.NewScalar(l.Get(1)) + + return dfl, nil +} + +func generatePublishTableParams(s *SubscribeRequest, listenHost string, listenPort int32) ([]model.DataForm, error) { + pubReq := make([]model.DataForm, 0, 7) + r, err := packListeningHostAndPort(listenHost, listenPort) + if err != nil { + return nil, err + } + + pubReq = append(pubReq, r...) + dfl, err := generatorGetSubscriptionTopicParams(s.TableName, s.ActionName) + if err != nil { + fmt.Printf("Failed to generate the params of GetSubscriptionTopic:%s\n", err.Error()) + return nil, err + } + + pubReq = append(pubReq, dfl...) + offset, err := model.NewDataType(model.DtLong, s.Offset) + if err != nil { + fmt.Printf("Failed to instantiate DataType with offset: %s\n", err.Error()) + return nil, err + } + + pubReq = append(pubReq, model.NewScalar(offset)) + if s.Filter != nil { + pubReq = append(pubReq, s.Filter) + } else { + void, err := model.NewDataType(model.DtVoid, "") + if err != nil { + fmt.Printf("Failed to instantiate DataType with void: %s\n", err.Error()) + return nil, err + } + + pubReq = append(pubReq, model.NewScalar(void)) + } + + if s.AllowExists { + al, err := model.NewDataType(model.DtBool, byte(1)) + if err != nil { + fmt.Printf("Failed to instantiate DataType with AllowExists: %s\n", err.Error()) + return nil, err + } + + pubReq = append(pubReq, model.NewScalar(al)) + return pubReq, nil + } + + return pubReq[:6], nil +} + +func packListeningHostAndPort(listeningHost string, listeningPort int32) ([]model.DataForm, error) { + localIP, err := model.NewDataType(model.DtString, listeningHost) + if err != nil { + fmt.Printf("Failed to instantiate DataType with listeningHost: %s\n", err.Error()) + return nil, err + } + + port, err := model.NewDataType(model.DtInt, listeningPort) + if err != nil { + fmt.Printf("Failed to instantiate DataType with listeningPort: %s\n", err.Error()) + return nil, err + } + + return []model.DataForm{model.NewScalar(localIP), model.NewScalar(port)}, nil +} + +func generateStopPublishTableParams(s *SubscribeRequest, listenHost string, listenPort int32) ([]model.DataForm, error) { + pubReq := make([]model.DataForm, 0) + + localIP, err := model.NewDataType(model.DtString, listenHost) + if err != nil { + fmt.Printf("Failed to instantiate DataType with listeningHost: %s\n", err.Error()) + return nil, err + } + + pubReq = append(pubReq, model.NewScalar(localIP)) + + port, err := model.NewDataType(model.DtInt, listenPort) + if err != nil { + fmt.Printf("Failed to instantiate DataType with listeningPort: %s\n", err.Error()) + return nil, err + } + + pubReq = append(pubReq, model.NewScalar(port)) + + dfl, err := generatorGetSubscriptionTopicParams(s.TableName, s.ActionName) + if err != nil { + fmt.Printf("Failed to generate the params of GetSubscriptionTopic: %s\n", err.Error()) + return nil, err + } + + pubReq = append(pubReq, dfl...) + + return pubReq, nil +} + +func getVersionNum(ver string) int { + if str := strings.Split(ver, " "); len(str) >= 2 { + verStr := strings.ReplaceAll(str[0], ".", "") + verNum, err := strconv.Atoi(verStr) + if err == nil { + return verNum + } + } + + return 0 +} diff --git a/streaming/util_test.go b/streaming/util_test.go new file mode 100644 index 0000000..f1ec227 --- /dev/null +++ b/streaming/util_test.go @@ -0,0 +1,67 @@ +package streaming + +import ( + "testing" + + "github.com/smallnest/chanx" + "github.com/stretchr/testify/assert" +) + +func TestUtil(t *testing.T) { + src := []string{"sample", "test"} + assert.Equal(t, contains(src, "sample"), true) + assert.Equal(t, contains(src, "example"), false) + + msg := &Message{ + topic: "topic,sample", + offset: -1, + } + + haTopicToTrueTopic.Store("topic", "topic") + queueMap.Store("topic", chanx.NewUnboundedChan(2)) + + dispatch(msg) + raw, ok := queueMap.Load("topic") + assert.Equal(t, ok, true) + + q := raw.(*chanx.UnboundedChan) + r := <-q.Out + m := r.(IMessage) + assert.Equal(t, m.GetOffset(), int64(-1)) + assert.Equal(t, m.GetTopic(), "topic,sample") + + batchDispatch([]IMessage{msg}) + r = <-q.Out + m = r.(IMessage) + assert.Equal(t, m.GetOffset(), int64(-1)) + assert.Equal(t, m.GetTopic(), "topic,sample") + + item := &reconnectItem{ + reconnectState: 1, + } + + item.putTopic("127.0.0.1:3000:local3000/sub/action1") + reconnectTable.Store("127.0.0.1:3000:local3000", item) + setReconnectTimestamp("127.0.0.1:3000:local3000", 10) + raw, ok = reconnectTable.Load("127.0.0.1:3000:local3000") + assert.Equal(t, ok, true) + + ri := raw.(*reconnectItem) + assert.Equal(t, ri.getTimeStamp(), int64(10)) + assert.Equal(t, ri.getState(), 1) + + b := IsClosed("topic") + assert.Equal(t, b, true) + + sites := []*site{ + { + tableName: "util", + closed: false, + }, + } + + trueTopicToSites.Store("topic", sites) + + b = IsClosed("topic") + assert.Equal(t, b, false) +} diff --git a/streaming/var.go b/streaming/var.go new file mode 100644 index 0000000..7be064d --- /dev/null +++ b/streaming/var.go @@ -0,0 +1,19 @@ +package streaming + +import "sync" + +const ( + // DefaultPort is default listening port. + DefaultPort = 8849 + localhost = "localhost" +) + +var ( + haTopicToTrueTopic = sync.Map{} + waitReconnectTopic = sync.Map{} + + messageCache = sync.Map{} + trueTopicToSites = sync.Map{} + queueMap = sync.Map{} + reconnectTable = sync.Map{} +) diff --git a/streaming/var_test.go b/streaming/var_test.go new file mode 100644 index 0000000..05e985e --- /dev/null +++ b/streaming/var_test.go @@ -0,0 +1 @@ +package streaming diff --git a/test/basicTypeTest/basicChart_test.go b/test/basicTypeTest/basicChart_test.go new file mode 100644 index 0000000..0870180 --- /dev/null +++ b/test/basicTypeTest/basicChart_test.go @@ -0,0 +1,106 @@ +package test + +import ( + "bytes" + "context" + "fmt" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Chart_DownLoad_DataType(t *testing.T) { + Convey("Test_Chart:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_chart_plot:", func() { + s, err := db.RunScript("x=1*(1..5);t=table(x);plot(t,x)") + So(err, ShouldBeNil) + result := s.(*model.Chart) + re := result.String() + So(re, ShouldNotBeNil) + reType := result.GetDataType() + So(reType, ShouldEqual, 25) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "any") + form := result.GetDataForm() + So(form, ShouldEqual, model.DfChart) + row := result.Rows() + So(row, ShouldEqual, model.DtInt) + title := result.GetTitle() + So(title, ShouldEqual, "") + ctype := result.GetChartType() + So(ctype, ShouldEqual, "CT_LINE") + xna := result.GetXAxisName() + yna := result.GetYAxisName() + So(xna, ShouldEqual, "x") + So(yna, ShouldEqual, "") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + by.Reset() + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Chart_UpLoad_DataType(t *testing.T) { + Convey("Test_Chart_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_chart_upload:", func() { + dtl, err := model.NewDataTypeListWithRaw(model.DtString, []string{"chart", "xaxis", "yaxis"}) + So(err, ShouldBeNil) + So(dtl.DataType(), ShouldEqual, model.DtString) + dl, err := model.NewDataTypeListWithRaw(model.DtString, []string{"chart", "xaxis", "yaxis"}) + So(err, ShouldBeNil) + ti := model.NewVector(dl) + dt, err := model.NewDataType(model.DtInt, int32(4)) + So(err, ShouldBeNil) + ct := model.NewScalar(dt) + dt, err = model.NewDataType(model.DtBool, byte(0)) + So(err, ShouldBeNil) + st := model.NewScalar(dt) + d, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1, 2, 3, 4, 5}) + So(err, ShouldBeNil) + rl, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1, 2, 3, 4, 5}) + So(err, ShouldBeNil) + cl, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1}) + So(err, ShouldBeNil) + data := model.NewMatrix(model.NewVector(d), model.NewVector(rl), model.NewVector(cl)) + ch := model.NewChart(map[string]model.DataForm{ + "title": ti, + "chartType": ct, + "stacking": st, + "data": data, + }) + fmt.Print(ch) + So(ch.GetDataForm(), ShouldEqual, model.DfChart) + So(ch.GetDataType(), ShouldEqual, model.DtAny) + So(ch.GetDataTypeString(), ShouldEqual, "any") + So(ch.GetTitle(), ShouldEqual, "chart") + So(ch.GetXAxisName(), ShouldEqual, "xaxis") + So(ch.GetYAxisName(), ShouldEqual, "yaxis") + So(ch.GetChartType(), ShouldEqual, "CT_LINE") + _, err = db.Upload(map[string]model.DataForm{"s": ch}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + fmt.Print(res) + re := res.(*model.Dictionary).Values + So(re.Get(0).String(), ShouldEqual, data.String()) + So(re.Get(1).String(), ShouldEqual, st.String()) + So(re.Get(2).String(), ShouldEqual, ct.String()) + So(re.Get(3).String(), ShouldEqual, ti.String()) + fmt.Print(re) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(STRING->ANY DICTIONARY)") + So(res.GetDataType(), ShouldEqual, model.DtAny) + }) + So(db.Close(), ShouldBeNil) + }) +} diff --git a/test/basicTypeTest/basicDictionary_test.go b/test/basicTypeTest/basicDictionary_test.go new file mode 100644 index 0000000..c684230 --- /dev/null +++ b/test/basicTypeTest/basicDictionary_test.go @@ -0,0 +1,1149 @@ +package test + +import ( + "context" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Dictionary_DownLoad_int(t *testing.T) { + Convey("Test_dictionary_int:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_int_not_null:", func() { + s, err := db.RunScript("x=2 -6 1024 1048576 -2019;y=4875 -23 1048576 666 -2205;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"2", "-6", "1024", "1048576", "-2019"} + val := [5]int32{4875, -23, 1048576, 666, -2205} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + form := result.GetDataForm() + So(form, ShouldEqual, 5) + }) + Convey("Test_dictionary_int_null_values:", func() { + s, err := db.RunScript("x=2 6 1 5 9;y=take(00i,5);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"2", "6", "1", "5", "9"} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + Convey("Test_dictionary_int_all_null:", func() { + s, err := db.RunScript("x = take(00i,6);y= take(00i,6);z = dict(x,y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_short(t *testing.T) { + Convey("Test_dictionary_short:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_short_not_null:", func() { + s, err := db.RunScript("x=2h -6h 1024h 4875h -2019h;y=4h 3333h 6666h 8888h -5h;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"2", "-6", "1024", "4875", "-2019"} + val := [5]int16{4, 3333, 6666, 8888, -5} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + Convey("Test_dictionary_short_null_values:", func() { + s, err := db.RunScript("x=2h 6h 1h 5h 9h;y=take(00h,5);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"2", "6", "1", "5", "9"} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_char(t *testing.T) { + Convey("Test_dictionary_char:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_char_not_null:", func() { + s, err := db.RunScript("x=97c 98c 99c 100c 101c ;y=102c 103c 104c 105c 106c ;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + re := result.KeyStrings() + key := [5]string{"97", "98", "99", "100", "101"} + val := [5]uint{102, 103, 104, 105, 106} + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + So(zx, ShouldEqual, val[i]) + So(re[i], ShouldBeIn, key) + get1, _ := result.Get(re[i]) + zx1 := get1.Value() + if v, ok := zx1.(int); ok { + So(val, ShouldContain, v) + } + reType := result.GetDataType() + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + } + }) + Convey("Test_dictionary_char_null_values:", func() { + s, err := db.RunScript("x=97c 98c 99c 100c 101c;y=take(char(['','','','','']),5);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + re := result.KeyStrings() + key := [5]string{"97", "98", "99", "100", "101"} + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + So(get.IsNull(), ShouldEqual, true) + So(re[i], ShouldBeIn, key) + } + reType := result.GetDataType() + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_symbol(t *testing.T) { + Convey("Test_dictionary_symbol:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_symbol_not_null:", func() { + s, err := db.RunScript("x=symbol(`A`B`C`D`E) ;y=symbol(`Z`X`C`V`B) ;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"A", "B", "C", "D", "E"} + val := [5]string{"Z", "X", "C", "V", "B"} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 17) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "symbol") + }) + Convey("Test_dictionary_symbol_null_values:", func() { + s, err := db.RunScript("x=symbol(`A`B`C`D`E) ;y=take(symbol(['','','','','']),5);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + re := result.KeyStrings() + key := [5]string{"A", "B", "C", "D", "E"} + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + So(get.IsNull(), ShouldEqual, false) + So(re[i], ShouldBeIn, key) + } + reType := result.GetDataType() + So(reType, ShouldEqual, 145) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "symbolExtend") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_string(t *testing.T) { + Convey("Test_dictionary_string:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_string_not_null:", func() { + s, err := db.RunScript("x= `IBM`C`MS`MSFT`JPM;y=`C`MS`MSFT`JPM`ORCL ;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + + key := [5]string{"IBM", "C", "MS", "MSFT", "JPM"} + val := [5]string{"C", "MS", "MSFT", "JPM", "ORCL"} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + }) + Convey("Test_dictionary_string_null_values:", func() { + s, err := db.RunScript("x= `IBM`C`MS`MSFT`JPM ;y=take((['','','','','']),5);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"IBM", "C", "MS", "MSFT", "JPM"} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_long(t *testing.T) { + Convey("Test_dictionary_long:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_long:", func() { + s, err := db.RunScript("x=97l -98l 1024l 1048576l -101110l ;y=-1102l 110103l 1024l -112105l 1048576l ;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"97", "-98", "1024", "1048576", "-101110"} + val := [5]int64{-1102, 110103, 1024, -112105, 1048576} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + Convey("Test_dictionary_long_null_values:", func() { + s, err := db.RunScript("x=97l 98l 99l 100l 101l;y=take(00l,5);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"97", "98", "99", "100", "101"} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_double(t *testing.T) { + Convey("Test_dictionary_double:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_double_not_null:", func() { + s, err := db.RunScript("x=97.5 -98.5 1099.5 148576.5 101111.5 ;y=102.5 103.5 104.5 105.5 106.5 ;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"97.5", "-98.5", "1099.5", "148576.5", "101111.5"} + val := [5]float64{102.5, 103.5, 104.5, 105.5, 106.5} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_dictionary_double_null_values:", func() { + s, err := db.RunScript("x=97.5 98.5 99.5 100.5 101.5 ;y=take(double(['','','','','']),5);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"97.5", "98.5", "99.5", "100.5", "101.5"} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_float(t *testing.T) { + Convey("Test_dictionary_float:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_float_not_null:", func() { + s, err := db.RunScript("x=97f -98f 99f 1024f 104857f ;y=102f 103f 104f 105f 106f ;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"97", "-98", "99", "1024", "104857"} + val := [5]float32{102, 103, 104, 105, 106} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_dictionary_float_null_values:", func() { + s, err := db.RunScript("x=97f 98f 99f 100f 101f;y=take(00f,5);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [5]string{"97", "98", "99", "100", "101"} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_date(t *testing.T) { + Convey("Test_dictionary_date:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_date_not_null:", func() { + s, err := db.RunScript("x=1969.12.31 1970.01.01 1970.01.02 2006.01.02 2006.01.03 2022.08.03 ;y=1969.12.31 1970.01.01 1970.01.02 2006.01.02 2006.01.03 2022.08.03;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"1969.12.31", "1970.01.01", "1970.01.02", "2006.01.02", "2006.01.03", "2022.08.03"} + val := []time.Time{time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 2, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 0, 0, 0, 0, time.UTC), time.Date(2022, 8, 3, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + Convey("Test_dictionary_date_null_values:", func() { + s, err := db.RunScript("x=2022.08.03+take(100..105,6);y=take(00d,6);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"2022.11.11", "2022.11.12", "2022.11.13", "2022.11.14", "2022.11.15", "2022.11.16"} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_month(t *testing.T) { + Convey("Test_dictionary_month:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_month_not_null:", func() { + s, err := db.RunScript("x=1969.12M 1970.01M 1970.02M 2006.01M 2006.02M 2022.08M ;y=1969.12M 1970.01M 1970.02M 2006.01M 2006.02M 2022.08M;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"1969.12M", "1970.01M", "1970.02M", "2006.01M", "2006.02M", "2022.08M"} + val := []time.Time{time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 2, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 2, 1, 0, 0, 0, 0, time.UTC), time.Date(2022, 8, 1, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + Convey("Test_dictionary_month_null_values:", func() { + s, err := db.RunScript("x=2012.08M+take(100..105,6);y=take(10+month(['','','','','','']),6);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"2020.12M", "2021.01M", "2021.02M", "2021.03M", "2021.04M", "2021.05M"} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_date_time(t *testing.T) { + Convey("Test_dictionary_time:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_time_not_null:", func() { + s, err := db.RunScript("x=23:59:59.999 00:00:00.000 00:00:01.999 15:04:04.999 15:04:05.000 15:00:15.000 ;y=23:59:59.999 00:00:00.000 00:00:01.999 15:04:04.999 15:04:05.000 15:00:15.000;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"23:59:59.999", "00:00:00.000", "00:00:01.999", "15:04:04.999", "15:04:05.000", "15:00:15.000"} + val := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + }) + Convey("Test_dictionary_time_null_values:", func() { + s, err := db.RunScript("x=09:30:00.000+take(100..105,6);y=take(10+time(['','','','','','']),6);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"09:30:00.100", "09:30:00.101", "09:30:00.102", "09:30:00.103", "09:30:00.104", "09:30:00.105"} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_minute(t *testing.T) { + Convey("Test_dictionary_minute:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_minute_not_null:", func() { + s, err := db.RunScript("x=23:59m 00:00m 00:01m 15:04m 15:05m 15:15m ;y=23:59m 00:00m 00:01m 15:04m 15:05m 15:15m;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"23:59m", "00:00m", "00:01m", "15:04m", "15:05m", "15:15m"} + val := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 1, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 5, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 15, 0, 0, time.UTC)} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + }) + Convey("Test_dictionary_minute_null_values:", func() { + s, err := db.RunScript("x=09:30m+take(100..105,6);y=take(00m,6);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"11:10m", "11:11m", "11:12m", "11:13m", "11:14m", "11:15m"} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_second(t *testing.T) { + Convey("Test_dictionary_second:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_second_not_null:", func() { + s, err := db.RunScript("x=23:59:59 00:00:00 00:00:01 15:04:04 15:04:05 15:00:15 ;y=23:59:59 00:00:00 00:00:01 15:04:04 15:04:05 15:00:15;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"23:59:59", "00:00:00", "00:00:01", "15:04:04", "15:04:05", "15:00:15"} + val := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + }) + Convey("Test_dictionary_second_null_values:", func() { + s, err := db.RunScript("x=09:30:00+take(100..105,6);y=take(00s,6);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"09:31:40", "09:31:41", "09:31:42", "09:31:43", "09:31:44", "09:31:45"} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_datetime(t *testing.T) { + Convey("Test_dictionary_datetime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_datetime_not_null:", func() { + s, err := db.RunScript("x = 1969.12.31T23:59:59 1970.01.01T00:00:00 1970.01.01T00:00:01 2006.01.02T15:04:04 2006.01.02T15:04:05 2022.08.03T15:00:15 ;y=1969.12.31T23:59:59 1970.01.01T00:00:00 1970.01.01T00:00:01 2006.01.02T15:04:04 2006.01.02T15:04:05 2022.08.03T15:00:15;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"1969.12.31T23:59:59", "1970.01.01T00:00:00", "1970.01.01T00:00:01", "2006.01.02T15:04:04", "2006.01.02T15:04:05", "2022.08.03T15:00:15"} + val := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + Convey("Test_dictionary_datetime_null_values:", func() { + s, err := db.RunScript("x=2022.08.03 11:00:00+take(10..15,6);y=take(datetime(['','','','','','']),6);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"2022.08.03T11:00:10", "2022.08.03T11:00:11", "2022.08.03T11:00:12", "2022.08.03T11:00:13", "2022.08.03T11:00:14", "2022.08.03T11:00:15"} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_timestamp(t *testing.T) { + Convey("Test_dictionary_timestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_timestamp_not_null:", func() { + s, err := db.RunScript("x=1969.12.31T23:59:59.999 1970.01.01T00:00:00.000 1970.01.01T00:00:01.999 2006.01.02T15:04:04.999 2006.01.02T15:04:05.000 2022.08.03T15:00:15.000;y=1969.12.31T23:59:59.999 1970.01.01T00:00:00.000 1970.01.01T00:00:01.999 2006.01.02T15:04:04.999 2006.01.02T15:04:05.000 2022.08.03T15:00:15.000;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"1969.12.31T23:59:59.999", "1970.01.01T00:00:00.000", "1970.01.01T00:00:01.999", "2006.01.02T15:04:04.999", "2006.01.02T15:04:05.000", "2022.08.03T15:00:15.000"} + val := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + Convey("Test_dictionary_timestamp_null_values:", func() { + s, err := db.RunScript("x=2012.08.03 11:00:00.000+take(100..105,6);y=take(timestamp(['','','','','','']),6);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"2012.08.03T11:00:00.100", "2012.08.03T11:00:00.101", "2012.08.03T11:00:00.102", "2012.08.03T11:00:00.103", "2012.08.03T11:00:00.104", "2012.08.03T11:00:00.105"} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_nanotime(t *testing.T) { + Convey("Test_dictionary_nanotime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_nanotime_not_time:", func() { + s, err := db.RunScript("x=23:59:59.999999999 00:00:00.000000000 00:00:01.999999999 15:04:04.999999999 15:04:05.000000000 15:00:15.000000000 ;y=23:59:59.999999999 00:00:00.000000000 00:00:01.999999999 15:04:04.999999999 15:04:05.000000000 15:00:15.000000000;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"23:59:59.999999999", "00:00:00.000000000", "00:00:01.999999999", "15:04:04.999999999", "15:04:05.000000000", "15:00:15.000000000"} + val := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + }) + Convey("Test_dictionary_nanotime_null_values:", func() { + s, err := db.RunScript("x=11:00:00.000000000+take(100..105,6);y=take(nanotime(['','','','','','']),6);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"11:00:00.000000100", "11:00:00.000000101", "11:00:00.000000102", "11:00:00.000000103", "11:00:00.000000104", "11:00:00.000000105"} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_nanotimestamp(t *testing.T) { + Convey("Test_dictionary_nanotimestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_nanotimestamp_not_time:", func() { + s, err := db.RunScript("x=1969.12.31T23:59:59.999999999 1970.01.01T00:00:00.000000000 1970.01.01T00:00:01.999999999 2006.01.02T15:04:04.999999999 2006.01.02T15:04:05.000000000 2022.08.03T15:00:15.000000000 ;y=1969.12.31T23:59:59.999999999 1970.01.01T00:00:00.000000000 1970.01.01T00:00:01.999999999 2006.01.02T15:04:04.999999999 2006.01.02T15:04:05.000000000 2022.08.03T15:00:15.000000000;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"1969.12.31T23:59:59.999999999", "1970.01.01T00:00:00.000000000", "1970.01.01T00:00:01.999999999", "2006.01.02T15:04:04.999999999", "2006.01.02T15:04:05.000000000", "2022.08.03T15:00:15.000000000"} + val := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + Convey("Test_dictionary_nanotimestamp_null_values:", func() { + s, err := db.RunScript("x=2022.08.03 11:00:00.000000000+take(100..105,6);y=take(nanotimestamp(['','','','','','']),6);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [6]string{"2022.08.03T11:00:00.000000100", "2022.08.03T11:00:00.000000101", "2022.08.03T11:00:00.000000102", "2022.08.03T11:00:00.000000103", "2022.08.03T11:00:00.000000104", "2022.08.03T11:00:00.000000105"} + var k int + for i := 0; i < 6; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_DownLoad_datehour(t *testing.T) { + Convey("Test_dictionary_datehour:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_datehour_not_null:", func() { + s, err := db.RunScript("x = datehour[1969.12.31T23:59:59.999, 1970.01.01T00:00:00.000, 2006.01.02T15:04:04.999] ;y=datehour[1969.12.31T23:59:59.999, 1970.01.01T00:00:00.000, 2006.01.02T15:04:04.999] ;z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [3]string{"1969.12.31T23", "1970.01.01T00", "2006.01.02T15"} + val := []time.Time{time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < 3; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + }) + Convey("Test_dictionary_datehour_null_values:", func() { + s, err := db.RunScript("x=datehour([2022.07.29 15:00:00.000, 2022.07.29 16:00:00.000, 2022.07.29 17:00:00.000]);y=take(datehour(['','','']),3);z=dict(x, y);z") + So(err, ShouldBeNil) + result := s.(*model.Dictionary) + key := [3]string{"2022.07.29T15", "2022.07.29T16", "2022.07.29T17"} + var k int + for i := 0; i < 3; i++ { + get, _ := result.Get(key[i]) + if get.IsNull() == true { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_UpLoad_int_and_long(t *testing.T) { + Convey("Test_dictionary_int->long_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_int->long:", func() { + keys, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{2, -6, 1024, 1048576, -2019}) + So(err, ShouldBeNil) + values, err := model.NewDataTypeListWithRaw(model.DtLong, []int64{4875, -23, 1048576, 666, -2205}) + So(err, ShouldBeNil) + dict := model.NewDictionary(model.NewVector(keys), model.NewVector(values)) + _, err = db.Upload(map[string]model.DataForm{"s": dict}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + result := res.(*model.Dictionary) + ty, _ := db.RunScript("typestr(s)") + key := [5]string{"2", "-6", "1024", "1048576", "-2019"} + val := [5]int64{4875, -23, 1048576, 666, -2205} + var k int + for i := 0; i < 5; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + So(ty.String(), ShouldEqual, "string(INT->LONG DICTIONARY)") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_UpLoad_short_and_char(t *testing.T) { + Convey("Test_dictionary_short->char_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_short->char:", func() { + keys, err := model.NewDataTypeListWithRaw(model.DtShort, []int16{2, -6, 102}) + So(err, ShouldBeNil) + values, err := model.NewDataTypeListWithRaw(model.DtChar, []byte{48, 23, 10}) + So(err, ShouldBeNil) + dict := model.NewDictionary(model.NewVector(keys), model.NewVector(values)) + _, err = db.Upload(map[string]model.DataForm{"s": dict}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + result := res.(*model.Dictionary) + re := result.KeyStrings() + ty, _ := db.RunScript("typestr(s)") + key := []string{"2", "-6", "102"} + val := []byte{48, 23, 10} + for i := 0; i < len(re); i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + So(zx, ShouldEqual, val[i]) + So(re[i], ShouldBeIn, key) + get1, _ := result.Get(re[i]) + zx1 := get1.Value() + if v, ok := zx1.(int); ok { + So(val, ShouldContain, v) + } + } + So(ty.String(), ShouldEqual, "string(SHORT->CHAR DICTIONARY)") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_UpLoad_long_and_float(t *testing.T) { + Convey("Test_dictionary_long->float_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_long->float:", func() { + keys, err := model.NewDataTypeListWithRaw(model.DtLong, []int64{1522542, -1768546, 2022102}) + So(err, ShouldBeNil) + values, err := model.NewDataTypeListWithRaw(model.DtFloat, []float32{48.10485, 278953.6, 5454.1515}) + So(err, ShouldBeNil) + dict := model.NewDictionary(model.NewVector(keys), model.NewVector(values)) + _, err = db.Upload(map[string]model.DataForm{"s": dict}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + result := res.(*model.Dictionary) + ty, _ := db.RunScript("typestr(s)") + key := []string{"1522542", "-1768546", "2022102"} + val := []float32{48.10485, 278953.6, 5454.1515} + var k int + for i := 0; i < 3; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + So(ty.String(), ShouldEqual, "string(LONG->FLOAT DICTIONARY)") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_UpLoad_double_and_date(t *testing.T) { + Convey("Test_dictionary_double->date_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_double->date:", func() { + keys, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{1522.12, -1766.321, 2102.5454}) + So(err, ShouldBeNil) + values, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + dict := model.NewDictionary(model.NewVector(keys), model.NewVector(values)) + _, err = db.Upload(map[string]model.DataForm{"s": dict}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + result := res.(*model.Dictionary) + ty, _ := db.RunScript("typestr(s)") + key := []string{"1522.12", "-1766.321", "2102.5454"} + val := []time.Time{time.Date(2022, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < 3; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + So(ty.String(), ShouldEqual, "string(DOUBLE->DATE DICTIONARY)") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_UpLoad_month_and_time(t *testing.T) { + Convey("Test_dictionary_month->time_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_double->date:", func() { + keys, err := model.NewDataTypeListWithRaw(model.DtMonth, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + values, err := model.NewDataTypeListWithRaw(model.DtTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + dict := model.NewDictionary(model.NewVector(keys), model.NewVector(values)) + _, err = db.Upload(map[string]model.DataForm{"s": dict}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + result := res.(*model.Dictionary) + ty, _ := db.RunScript("typestr(s)") + key := []string{"2022.12M", "1969.12M", "2006.01M"} + val := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC)} + var k int + for i := 0; i < 3; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + So(ty.String(), ShouldEqual, "string(MONTH->TIME DICTIONARY)") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_UpLoad_minute_and_second(t *testing.T) { + Convey("Test_dictionary_minute->second_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_minute->second:", func() { + keys, err := model.NewDataTypeListWithRaw(model.DtMinute, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + values, err := model.NewDataTypeListWithRaw(model.DtSecond, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + dict := model.NewDictionary(model.NewVector(keys), model.NewVector(values)) + _, err = db.Upload(map[string]model.DataForm{"s": dict}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + result := res.(*model.Dictionary) + ty, _ := db.RunScript("typestr(s)") + key := []string{"23:59m", "23:59m", "15:04m"} + val := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC)} + var k int + for i := 0; i < 3; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, 3) + So(ty.String(), ShouldEqual, "string(MINUTE->SECOND DICTIONARY)") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_UpLoad_datetime_and_timestamp(t *testing.T) { + Convey("Test_dictionary_datetime->timestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_datetime->timestamp:", func() { + keys, err := model.NewDataTypeListWithRaw(model.DtDatetime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + values, err := model.NewDataTypeListWithRaw(model.DtTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + dict := model.NewDictionary(model.NewVector(keys), model.NewVector(values)) + _, err = db.Upload(map[string]model.DataForm{"s": dict}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + result := res.(*model.Dictionary) + ty, _ := db.RunScript("typestr(s)") + key := []string{"2022.12.31T23:59:59", "1969.12.31T23:59:59", "2006.01.02T15:04:04"} + val := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC)} + var k int + for i := 0; i < 3; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + So(ty.String(), ShouldEqual, "string(DATETIME->TIMESTAMP DICTIONARY)") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_UpLoad_nanotime_and_nanotimestamp(t *testing.T) { + Convey("Test_dictionary_nanotime->nanotimestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_nanotime->nanotimestamp:", func() { + keys, err := model.NewDataTypeListWithRaw(model.DtNanoTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + values, err := model.NewDataTypeListWithRaw(model.DtNanoTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + dict := model.NewDictionary(model.NewVector(keys), model.NewVector(values)) + _, err = db.Upload(map[string]model.DataForm{"s": dict}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + result := res.(*model.Dictionary) + ty, _ := db.RunScript("typestr(s)") + key := []string{"23:59:59.999999999", "23:59:59.999999999", "15:04:04.999999999"} + val := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)} + var k int + for i := 0; i < 3; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, 3) + So(ty.String(), ShouldEqual, "string(NANOTIME->NANOTIMESTAMP DICTIONARY)") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_UpLoad_string_and_datehour(t *testing.T) { + Convey("Test_dictionary_string->datehour_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_string->datehour:", func() { + keys, err := model.NewDataTypeListWithRaw(model.DtString, []string{"hello", "%^*", "数据类型"}) + So(err, ShouldBeNil) + values, err := model.NewDataTypeListWithRaw(model.DtDateHour, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + dict := model.NewDictionary(model.NewVector(keys), model.NewVector(values)) + _, err = db.Upload(map[string]model.DataForm{"s": dict}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + result := res.(*model.Dictionary) + ty, _ := db.RunScript("typestr(s)") + key := []string{"hello", "%^*", "数据类型"} + val := []time.Time{time.Date(2022, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < 3; i++ { + get, _ := result.Get(key[i]) + zx := get.Value() + if zx == val[i] { + k++ + } + } + So(k, ShouldEqual, result.Values.RowCount) + So(ty.String(), ShouldEqual, "string(STRING->DATEHOUR DICTIONARY)") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Dictionary_UpLoad_big_array_int_and_string(t *testing.T) { + Convey("Test_dictionary_big_array_int_and_string:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_dictionary_int->string:", func() { + var i int32 + intv := []int32{} + for i = 0; i < 3000000*12; i += 12 { + intv = append(intv, i) + } + intv = append(intv, model.NullInt) + keys, err := model.NewDataTypeListWithRaw(model.DtInt, intv) + So(err, ShouldBeNil) + stringv := []string{} + for i = 0; i < 3000000*12; i += 12 { + stringv = append(stringv, string("hello")) + } + stringv = append(stringv, model.NullString) + values, err := model.NewDataTypeListWithRaw(model.DtString, stringv) + So(err, ShouldBeNil) + dict := model.NewDictionary(model.NewVector(keys), model.NewVector(values)) + _, err = db.Upload(map[string]model.DataForm{"s": dict}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + result := res.(*model.Dictionary) + So(result.Keys.RowCount, ShouldEqual, 3000001) + So(result.Values.RowCount, ShouldEqual, 3000001) + ty, _ := db.RunScript("typestr(s)") + So(ty.String(), ShouldEqual, "string(INT->STRING DICTIONARY)") + }) + So(db.Close(), ShouldBeNil) + }) +} diff --git a/test/basicTypeTest/basicMatrix_test.go b/test/basicTypeTest/basicMatrix_test.go new file mode 100644 index 0000000..5e1ef4b --- /dev/null +++ b/test/basicTypeTest/basicMatrix_test.go @@ -0,0 +1,1485 @@ +package test + +import ( + "bytes" + "context" + "math" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Matrix_DownLoad_int(t *testing.T) { + Convey("Test_matrix_int:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_int_not_null:", func() { + s, err := db.RunScript("[1, -2, 93, 1024, -2025, 1048576]$3:2") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [3][2]int32{{1, 1024}, {-2, -2025}, {93, 1048576}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == zx[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + form := result.GetDataForm() + So(form, ShouldEqual, 3) + }) + Convey("Test_matrix_int_null:", func() { + s, err := db.RunScript("matrix(INT, 3, 2)") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [3][2]int{} + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_short(t *testing.T) { + Convey("Test_matrix_short:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_short_not_null:", func() { + s, err := db.RunScript("[1h, -2h, 93h, 1024h, -2025h, 32766h]$3:2") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [3][2]int16{{1, 1024}, {-2, -2025}, {93, 32766}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == zx[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + Convey("Test_matrix_short_null:", func() { + s, err := db.RunScript("matrix(SHORT, 3, 2)") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [3][2]int16{} + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_long(t *testing.T) { + Convey("Test_matrix_long:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_long_not_null:", func() { + s, err := db.RunScript("[1l, 12l, -15l,1024l, 1048576l, 24l]$3:2") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [3][2]int64{{1, 1024}, {12, 1048576}, {-15, 24}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == zx[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + by.Reset() + }) + Convey("Test_matrix_long_null:", func() { + s, err := db.RunScript("matrix(LONG, 3, 2)") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [3][2]int64{} + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_double(t *testing.T) { + Convey("Test_matrix_double:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_double_not_null:", func() { + s, err := db.RunScript("[1.1, -1.2, 1300.0, 1024.0, 1.5, 1048576.0]$3:2") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [3][2]float64{{1.1, 1024}, {-1.2, 1.5}, {1300, 1048576}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == zx[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_matrix_double_null:", func() { + s, err := db.RunScript("matrix(DOUBLE, 3, 2)") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [3][2]float64{} + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_float(t *testing.T) { + Convey("Test_matrix_float:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_float_not_null:", func() { + s, err := db.RunScript("[1.1f, -1.2f, 1024.3f, -2025.4f, 1048576.5f, 5201314.6f]$3:2") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [3][2]float32{{1.1, -2025.4}, {-1.2, 1048576.5}, {1024.3, 5201314.6}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == zx[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_matrix_float_null:", func() { + s, err := db.RunScript("matrix(FLOAT, 3, 2)") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [3][2]float32{} + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_symbol(t *testing.T) { + Convey("Test_matrix_symbol:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_symbol_not_null:", func() { + s, err := db.RunScript("symbol(`A +string(1..9))$3:3") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [3][3]string{{"A1", "A4", "A7"}, {"A2", "A5", "A8"}, {"A3", "A6", "A9"}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == zx[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 3) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 17) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "symbol") + }) + Convey("Test_matrix_symbol_null:", func() { + s, err := db.RunScript("matrix(SYMBOL, 3, 2)") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + So(result.Get(i, j).IsNull(), ShouldEqual, true) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 145) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "symbolExtend") + }) + Convey("Test_matrix_symbol_all_null:", func() { + s, err := db.RunScript("symbol(take(string(), 12))$3:4") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 4; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, "") + } + } + }) + Convey("Test_matrix_symbol_some_null:", func() { + s, err := db.RunScript("symbol(['AA', 'BB',NULL, 'CC',NULL, 'DD'])$2:3") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [2][3]string{{"AA"}, {"BB", "CC", "DD"}} + for i := 0; i < 2; i++ { + for j := 0; j < 3; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_date(t *testing.T) { + Convey("Test_matrix_date:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_date_not_null:", func() { + s, err := db.RunScript("a = 1969.12.31 1970.01.01 1970.01.02 2006.01.02 2006.01.03 2022.08.03 $3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + time1 := [][]time.Time{{time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 0, 0, 0, 0, time.UTC)}, {time.Date(1970, 1, 2, 0, 0, 0, 0, time.UTC), time.Date(2022, 8, 3, 0, 0, 0, 0, time.UTC)}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == time1[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + Convey("Test_matrix_date_null:", func() { + s, err := db.RunScript("a = take(00d,6)$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + So(result.Get(i, j).IsNull(), ShouldEqual, true) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_month(t *testing.T) { + Convey("Test_matrix_month:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_month_not_null:", func() { + s, err := db.RunScript("a = 1969.12M 1970.01M 1970.02M 2006.01M 2006.02M 2022.08M $3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + time1 := [][]time.Time{{time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 2, 1, 0, 0, 0, 0, time.UTC)}, {time.Date(1970, 2, 1, 0, 0, 0, 0, time.UTC), time.Date(2022, 8, 1, 0, 0, 0, 0, time.UTC)}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == time1[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + Convey("Test_matrix_month_early_1970:", func() { + s, err := db.RunScript("a = take(1922.06M+1..6,6)$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + time1 := [3][2]string{{"1922-07", "1922-10"}, {"1922-08", "1922-11"}, {"1922-09", "1922-12"}} + t0, _ := time.Parse("2006-01", time1[0][0]) + t1, _ := time.Parse("2006-01", time1[0][1]) + t2, _ := time.Parse("2006-01", time1[1][0]) + t3, _ := time.Parse("2006-01", time1[1][1]) + t4, _ := time.Parse("2006-01", time1[2][0]) + t5, _ := time.Parse("2006-01", time1[2][1]) + So(result.Get(0, 0).Value(), ShouldEqual, t0) + So(result.Get(0, 1).Value(), ShouldEqual, t1) + So(result.Get(1, 0).Value(), ShouldEqual, t2) + So(result.Get(1, 1).Value(), ShouldEqual, t3) + So(result.Get(2, 0).Value(), ShouldEqual, t4) + So(result.Get(2, 1).Value(), ShouldEqual, t5) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + Convey("Test_matrix_month_null:", func() { + s, err := db.RunScript("a = take(month(['','','','','',''])+1..6,6)$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + So(result.Get(i, j).IsNull(), ShouldEqual, true) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_time(t *testing.T) { + Convey("Test_matrix_time:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_time_not_null:", func() { + s, err := db.RunScript("a = 23:59:59.999 00:00:00.000 00:00:01.999 15:04:04.999 15:04:05.000 15:00:15.000 $3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + time1 := [][]time.Time{{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 1, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == time1[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + }) + Convey("Test_matrix_time_null:", func() { + s, err := db.RunScript("a = take(time(['','','','','',''])+1..6,6)$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + So(result.Get(i, j).IsNull(), ShouldEqual, true) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_minute(t *testing.T) { + Convey("Test_matrix_minute:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_minute_not_null:", func() { + s, err := db.RunScript("a = 23:59m 00:00m 00:01m 15:04m 15:05m 15:15m $3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + time1 := [][]time.Time{{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 0, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 5, 0, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 1, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 15, 0, 0, time.UTC)}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == time1[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + result.SetNull(1, 0) + }) + Convey("Test_matrix_minute_null:", func() { + s, err := db.RunScript("a = take(00m,6)$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + So(result.Get(i, j).IsNull(), ShouldEqual, true) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_second(t *testing.T) { + Convey("Test_matrix_second:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_second_not_null:", func() { + s, err := db.RunScript("a = 23:59:59 00:00:00 00:00:01 15:04:04 15:04:05 15:00:15 $3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + time1 := [][]time.Time{{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == time1[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + }) + Convey("Test_matrix_second_null:", func() { + s, err := db.RunScript("a = take(00s,6)$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + So(result.Get(i, j).IsNull(), ShouldEqual, true) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_datetime(t *testing.T) { + Convey("Test_matrix_datetime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_datetime_not_null:", func() { + s, err := db.RunScript("a = 1969.12.31T23:59:59 1970.01.01T00:00:00 1970.01.01T00:00:01 2006.01.02T15:04:04 2006.01.02T15:04:05 2022.08.03T15:00:15 $3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + time1 := [][]time.Time{{time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == time1[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + Convey("Test_matrix_datetime_null:", func() { + s, err := db.RunScript("a = take(datetime(['','','','','',''])+1..6,6)$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + So(result.Get(i, j).IsNull(), ShouldEqual, true) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_timestamp(t *testing.T) { + Convey("Test_matrix_timestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_timestamp_not_null:", func() { + s, err := db.RunScript("a = 1969.12.31T23:59:59.999 1970.01.01T00:00:00.000 1970.01.01T00:00:01.999 2006.01.02T15:04:04.999 2006.01.02T15:04:05.000 2022.08.03T15:00:15.000 $3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + time1 := [][]time.Time{{time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 1, 999000000, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == time1[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + Convey("Test_matrix_timestamp_null:", func() { + s, err := db.RunScript("a = take(timestamp(['','','','','',''])+1..6,6)$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + So(result.Get(i, j).IsNull(), ShouldEqual, true) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_nanotime(t *testing.T) { + Convey("Test_matrix_nanotime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_nanotime_not_null:", func() { + s, err := db.RunScript("a = 23:59:59.999999999 00:00:00.000000000 00:00:01.999999999 15:04:04.999999999 15:04:05.000000000 15:00:15.000000000 $3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + time1 := [][]time.Time{{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 1, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == time1[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + }) + Convey("Test_matrix_nanotime_null:", func() { + s, err := db.RunScript("a = take(nanotime(['','','','','',''])+1..6,6)$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + So(result.Get(i, j).IsNull(), ShouldEqual, true) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_nanotimestamp(t *testing.T) { + Convey("Test_matrix_nanotimestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_nanotimestamp_not_null:", func() { + s, err := db.RunScript("a = 1969.12.31T23:59:59.999999999 1970.01.01T00:00:00.000000000 1970.01.01T00:00:01.999999999 2006.01.02T15:04:04.999999999 2006.01.02T15:04:05.000000000 2022.08.03T15:00:15.000000000 $3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + time1 := [][]time.Time{{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 1, 999999999, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == time1[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + Convey("Test_matrix_nanotimestamp_null:", func() { + s, err := db.RunScript("a = take(nanotimestamp(['','','','','',''])+1..6,6)$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + So(result.Get(i, j).IsNull(), ShouldEqual, true) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_datehour(t *testing.T) { + Convey("Test_matrix_datehour:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_datehour_not_null:", func() { + s, err := db.RunScript("a = datehour[1969.12.31T23:59:59.999, 1970.01.01T00:00:00.000, 1970.01.01T00:00:01.999, 2006.01.02T15:04:04.999, 2006.01.02T15:04:05.000, 2022.08.03T15:00:15.000]$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + time1 := [][]time.Time{{time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)}, {time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 0, 0, time.UTC)}} + var k int + for i := 0; i < result.Data.Rows(); i++ { + for j := 0; j < int(result.Data.ColumnCount); j++ { + if result.Get(i, j).Value() == time1[i][j] { + k++ + } + } + } + So(result.Data.ColumnCount, ShouldEqual, 2) + So(result.Data.RowCount, ShouldEqual, 3) + So(k, ShouldEqual, result.Data.ColumnCount*result.Data.RowCount) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + }) + Convey("Test_matrix_datehour_null:", func() { + s, err := db.RunScript("a = take(datehour(['','','','','',''])+1..6,6)$3:2;a") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + So(result.Get(i, j).IsNull(), ShouldEqual, true) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_ony_one_column(t *testing.T) { + Convey("Test_matrix_only_one_column:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_one_column:", func() { + s, err := db.RunScript("matrix(1..6)") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [6][1]int{} + for i := 0; i < 6; i++ { + for j := 0; j < 1; j++ { + re := result.Get(i, j).Value() + zx[i][j] = i + 1 + So(re, ShouldEqual, zx[i][j]) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_only_one_row(t *testing.T) { + Convey("Test_matrix_only_one_row:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_one_row:", func() { + s, err := db.RunScript("matrix(take(1, 6)).transpose()") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + zx := [1][6]int{} + for i := 0; i < 1; i++ { + for j := 0; j < 6; j++ { + re := result.Get(i, j).Value() + zx[i][j] = i + 1 + So(re, ShouldEqual, zx[i][j]) + } + } + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_with_int_label(t *testing.T) { + Convey("Test_matrix_with_int_label:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_int_with_label:", func() { + s, err := db.RunScript("cross(add,1..3,1..4)") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + row := result.RowLabels.Data.Value() + col := result.ColumnLabels.Data.Value() + rerow := [3]int{1, 2, 3} + recol := [4]int{1, 2, 3, 4} + for i := 0; i < 3; i++ { + So(row[i], ShouldEqual, rerow[i]) + } + for i := 0; i < 4; i++ { + So(col[i], ShouldEqual, recol[i]) + } + zx := [3][4]int{} + for i := 0; i < 3; i++ { + for j := 0; j < 4; j++ { + re := result.Get(i, j).Value() + zx[i][j] = i + j + 2 + So(re, ShouldEqual, zx[i][j]) + } + } + }) + Convey("Test_matrix_int_only_with_row_label :", func() { + s, err := db.RunScript("m=1..6$3:2;m.rename!([0, 1, 2],);m") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + row := result.RowLabels.Data.Value() + rerow := [3]int{0, 1, 2} + for i := 0; i < 3; i++ { + So(row[i], ShouldEqual, rerow[i]) + } + zx := [3][2]int{{1, 4}, {2, 5}, {3, 6}} + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + }) + Convey("Test_matrix_int_only_with_col_label :", func() { + s, err := db.RunScript("m=1..6$3:2;m.rename!([0, 1]);m") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + col := result.ColumnLabels.Data.Value() + recol := [2]int{0, 1} + for i := 0; i < 2; i++ { + So(col[i], ShouldEqual, recol[i]) + } + zx := [3][2]int{{1, 4}, {2, 5}, {3, 6}} + for i := 0; i < 3; i++ { + for j := 0; j < 2; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_DownLoad_with_time_label(t *testing.T) { + Convey("Test_matrix_with_time_label:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_label_date_symbol:", func() { + s, err := db.RunScript("m=matrix([2200, 1300, 2500, 8800], [6800, 5400,NULL,NULL], [1900, 2100, 3200,NULL]).rename!(2012.01.01..2012.01.04, symbol(`C`IBM`MS));m") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + row := result.RowLabels.Data.Value() + col := result.ColumnLabels.Data.Value() + time1 := [4]string{"2012-01-01 00:00:00", "2012-01-02 00:00:00", "2012-01-03 00:00:00", "2012-01-04 00:00:00"} + t0, _ := time.Parse("2006-01-02 15:04:05", time1[0]) + t1, _ := time.Parse("2006-01-02 15:04:05", time1[1]) + t2, _ := time.Parse("2006-01-02 15:04:05", time1[2]) + t3, _ := time.Parse("2006-01-02 15:04:05", time1[3]) + So(row[0], ShouldEqual, t0) + So(row[1], ShouldEqual, t1) + So(row[2], ShouldEqual, t2) + So(row[3], ShouldEqual, t3) + recol := [3]string{"C", "IBM", "MS"} + for i := 0; i < 3; i++ { + So(col[i], ShouldEqual, recol[i]) + } + zx := [4][3]int{{2200, 6800, 1900}, {1300, 5400, 2100}, {2500, math.MinInt32, 3200}, {8800, math.MinInt32, math.MinInt32}} + for i := 0; i < 4; i++ { + for j := 0; j < 3; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + }) + Convey("Test_matrix_label_second_symbol:", func() { + s, err := db.RunScript("m=matrix([2200, 1300, 2500, 8800], [6800, 5400,NULL,NULL], [1900, 2100, 3200,NULL]).rename!([09:30:00, 10:00:00, 10:30:00, 11:00:00], symbol(`C`IBM`MS));m") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + row := result.RowLabels.Data.Value() + col := result.ColumnLabels.Data.Value() + time1 := [4]string{"1970-01-01T09:30:00", "1970-01-01T10:00:00", "1970-01-01T10:30:00", "1970-01-01T11:00:00"} + t0, _ := time.Parse("2006-01-02T15:04:05", time1[0]) + t1, _ := time.Parse("2006-01-02T15:04:05", time1[1]) + t2, _ := time.Parse("2006-01-02T15:04:05", time1[2]) + t3, _ := time.Parse("2006-01-02T15:04:05", time1[3]) + So(row[0], ShouldEqual, t0) + So(row[1], ShouldEqual, t1) + So(row[2], ShouldEqual, t2) + So(row[3], ShouldEqual, t3) + recol := [3]string{"C", "IBM", "MS"} + for i := 0; i < 3; i++ { + So(col[i], ShouldEqual, recol[i]) + } + zx := [4][3]int{{2200, 6800, 1900}, {1300, 5400, 2100}, {2500, math.MinInt32, 3200}, {8800, math.MinInt32, math.MinInt32}} + for i := 0; i < 4; i++ { + for j := 0; j < 3; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + }) + Convey("Test_matrix_label_symbol_date:", func() { + s, err := db.RunScript("m=matrix([2200, 1300, 2500, 8800], [6800, 5400,NULL,NULL], [1900, 2100, 3200,NULL]).rename!(`C`IBM`MS`ZZ, 2012.01.01..2012.01.03);m") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + row := result.RowLabels.Data.Value() + col := result.ColumnLabels.Data.Value() + rerow := [4]string{"C", "IBM", "MS", "ZZ"} + for i := 0; i < 4; i++ { + So(row[i], ShouldEqual, rerow[i]) + } + time1 := [3]string{"2012-01-01 00:00:00", "2012-01-02 00:00:00", "2012-01-03 00:00:00"} + t0, _ := time.Parse("2006-01-02 15:04:05", time1[0]) + t1, _ := time.Parse("2006-01-02 15:04:05", time1[1]) + t2, _ := time.Parse("2006-01-02 15:04:05", time1[2]) + So(col[0], ShouldEqual, t0) + So(col[1], ShouldEqual, t1) + So(col[2], ShouldEqual, t2) + zx := [4][3]int{{2200, 6800, 1900}, {1300, 5400, 2100}, {2500, math.MinInt32, 3200}, {8800, math.MinInt32, math.MinInt32}} + for i := 0; i < 4; i++ { + for j := 0; j < 3; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + }) + Convey("Test_matrix_label_symbol_second:", func() { + s, err := db.RunScript("m=matrix([2200, 1300, 2500, 8800], [6800, 5400,NULL,NULL], [1900, 2100, 3200,NULL]).rename!(`C`IBM`MS`ZZ, [09:30:00, 10:00:00, 10:30:00]);m") + So(err, ShouldBeNil) + result := s.(*model.Matrix) + row := result.RowLabels.Data.Value() + col := result.ColumnLabels.Data.Value() + rerow := [4]string{"C", "IBM", "MS", "ZZ"} + for i := 0; i < 4; i++ { + So(row[i], ShouldEqual, rerow[i]) + } + time1 := [3]string{"1970-01-01T09:30:00", "1970-01-01T10:00:00", "1970-01-01T10:30:00"} + t0, _ := time.Parse("2006-01-02T15:04:05", time1[0]) + t1, _ := time.Parse("2006-01-02T15:04:05", time1[1]) + t2, _ := time.Parse("2006-01-02T15:04:05", time1[2]) + So(col[0], ShouldEqual, t0) + So(col[1], ShouldEqual, t1) + So(col[2], ShouldEqual, t2) + zx := [4][3]int{{2200, 6800, 1900}, {1300, 5400, 2100}, {2500, math.MinInt32, 3200}, {8800, math.MinInt32, math.MinInt32}} + for i := 0; i < 4; i++ { + for j := 0; j < 3; j++ { + re := result.Get(i, j).Value() + So(re, ShouldEqual, zx[i][j]) + } + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_int(t *testing.T) { + Convey("Test_matrix_int_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_int_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + zx := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9} + var j int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST INT MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtInt) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_short(t *testing.T) { + Convey("Test_matrix_short_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_char_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtShort, []int16{1, 2, 3, 4, 5, 6, 7, 8, 9}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + zx := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9} + var j int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST SHORT MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtShort) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_char(t *testing.T) { + Convey("Test_matrix_char_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_char_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtChar, []byte{1, 2, 3, 4, 5, 6, 7, 8, 9}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + zx := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9} + for i := 0; i < len(re); i++ { + So(re[i], ShouldEqual, zx[i]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST CHAR MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtChar) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_long(t *testing.T) { + Convey("Test_matrix_long_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_long_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtLong, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + zx := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9} + var j int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST LONG MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtLong) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_float(t *testing.T) { + Convey("Test_matrix_float_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_float_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtFloat, []float32{1024.2, -2.10, 36897542.233, -5454545454, 8989.12125, 6, 7, 8, 9}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + zx := []float32{1024.2, -2.10, 36897542.233, -5454545454, 8989.12125, 6, 7, 8, 9} + var j int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST FLOAT MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtFloat) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_double(t *testing.T) { + Convey("Test_matrix_double_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_double_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{1024.2, -2.10, 36897542.233, -5454545454, 8989.12125, 6, 7, 8, 9}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + zx := []float64{1024.2, -2.10, 36897542.233, -5454545454, 8989.12125, 6, 7, 8, 9} + var j int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST DOUBLE MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtDouble) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_date(t *testing.T) { + Convey("Test_matrix_date_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_date_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC)} + var j int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST DATE MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtDate) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_month(t *testing.T) { + Convey("Test_matrix_month_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_month_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtMonth, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC)} + var j int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST MONTH MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtMonth) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_time(t *testing.T) { + Convey("Test_matrix_time_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_time_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC)} + var j int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST TIME MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtTime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_minute(t *testing.T) { + Convey("Test_matrix_minute_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_minute_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtMinute, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 0, 0, time.UTC)} + var j int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST MINUTE MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtMinute) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_second(t *testing.T) { + Convey("Test_matrix_second_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_second_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtSecond, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC)} + var j int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST SECOND MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtSecond) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_datetime(t *testing.T) { + Convey("Test_matrix_datetime_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_datetime_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtDatetime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC)} + var j int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST DATETIME MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtDatetime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_timestamp(t *testing.T) { + Convey("Test_matrix_timestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_timestamp_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC)} + var j int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST TIMESTAMP MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtTimestamp) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_nanotime(t *testing.T) { + Convey("Test_matrix_nanotime_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_nanotime_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtNanoTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC)} + var j int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST NANOTIME MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtNanoTime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_nanotimestamp(t *testing.T) { + Convey("Test_matrix_nanotimestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_nanotimestamp_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtNanoTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)} + var j int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST NANOTIMESTAMP MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtNanoTimestamp) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_datehour(t *testing.T) { + Convey("Test_matrix_datehour_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_datehour_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtDateHour, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + var j int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST DATEHOUR MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtDateHour) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_complex(t *testing.T) { + Convey("Test_matrix_complex_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_complex_upload:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtComplex, [][2]float64{{1, 1}, {-1, -1024.5}, {1001022.4, -30028.75}}) + So(err, ShouldBeNil) + rl, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1}) + So(err, ShouldBeNil) + cl, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1, 2, 3}) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), model.NewVector(rl), model.NewVector(cl)) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + zx := []string{"1.00000+1.00000i", "-1.00000+-1024.50000i", "1001022.40000+-30028.75000i"} + var j int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + j++ + } + } + So(j, ShouldEqual, len(re)) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST COMPLEX MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtComplex) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Matrix_UpLoad_DataType_big_array(t *testing.T) { + Convey("Test_matrix_big_array_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_matrix_int_big_array_upload:", func() { + var i int32 + var j int + intv := []int32{} + for i = 0; i < 3000000*12; i += 12 { + intv = append(intv, i) + } + intv = append(intv, model.NullInt) + data, err := model.NewDataTypeListWithRaw(model.DtInt, intv) + So(err, ShouldBeNil) + mtx := model.NewMatrix(model.NewVector(data), nil, nil) + _, err = db.Upload(map[string]model.DataForm{"s": mtx}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + result := res.(*model.Matrix) + re := result.Data.Data.Value() + for i := 0; i < len(re); i++ { + if re[i] == intv[i] { + j++ + } + } + So(j, ShouldEqual, 3000001) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST INT MATRIX)") + So(res.GetDataType(), ShouldEqual, model.DtInt) + }) + So(db.Close(), ShouldBeNil) + }) +} diff --git a/test/basicTypeTest/basicPair_test.go b/test/basicTypeTest/basicPair_test.go new file mode 100644 index 0000000..f990025 --- /dev/null +++ b/test/basicTypeTest/basicPair_test.go @@ -0,0 +1,1580 @@ +package test + +import ( + "bytes" + "context" + "fmt" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Pair_DownLoad_int(t *testing.T) { + Convey("Test_pair_int:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_int_not_null:", func() { + s, err := db.RunScript("a=(-1024:1048576);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, -1024) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, 1048576) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + form := result.GetDataForm() + So(form, ShouldEqual, 2) + row := result.Rows() + So(row, ShouldEqual, 2) + }) + Convey("Test_pair_int_pre_one_nll:", func() { + s, err := db.RunScript("a=(:1048576);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, 1048576) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + Convey("Test_pair_int_last_one_nll:", func() { + s, err := db.RunScript("a=(1:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_string(t *testing.T) { + Convey("Test_pair_string:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_string_not_null:", func() { + s, err := db.RunScript("a=(`hello:`world);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, "hello") + So(result.Vector.Data.Get(1).Value(), ShouldEqual, "world") + reType := result.GetDataType() + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + }) + Convey("Test_pair_string_pre_one_null:", func() { + s, err := db.RunScript("a=(`:`theworld);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, "theworld") + reType := result.GetDataType() + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + }) + Convey("Test_pair_string_last_one_null:", func() { + s, err := db.RunScript("a=(`thehello :`);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, "thehello") + reType := result.GetDataType() + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_char(t *testing.T) { + Convey("Test_pair_char:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_char_not_null:", func() { + s, err := db.RunScript("a=(1c:124c);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, 1) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, 124) + reType := result.GetDataType() + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + }) + Convey("Test_pair_char_pre_one_null:", func() { + s, err := db.RunScript("a=( :124c);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, 124) + reType := result.GetDataType() + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + }) + Convey("Test_pair_char_last_one_null:", func() { + s, err := db.RunScript("a=(16c:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, 16) + reType := result.GetDataType() + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_bool(t *testing.T) { + Convey("Test_pair_bool:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_bool_not_null:", func() { + s, err := db.RunScript("a=(true:false);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, true) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, false) + reType := result.GetDataType() + So(reType, ShouldEqual, 1) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "bool") + }) + Convey("Test_pair_bool_pre_one_null:", func() { + s, err := db.RunScript("a=(:false);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, false) + reType := result.GetDataType() + So(reType, ShouldEqual, 1) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "bool") + }) + Convey("Test_pair_bool_pair_last_one_null:", func() { + s, err := db.RunScript("a=(true :);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, true) + reType := result.GetDataType() + So(reType, ShouldEqual, 1) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "bool") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_short(t *testing.T) { + Convey("Test_pair_short:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_short:", func() { + s, err := db.RunScript("a=(-2h:1024h);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, -2) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, 1024) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + Convey("Test_pair_short_pre_one_null:", func() { + s, err := db.RunScript("a=( :31689h);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, 31689) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + Convey("Test_pair_short_last_one_null:", func() { + s, err := db.RunScript("a=(-15225h:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, -15225) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + by.Reset() + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_long(t *testing.T) { + Convey("Test_pair_long:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_long_not_null:", func() { + s, err := db.RunScript("a=(-2l:1024l);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, -2) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, 1024) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + Convey("Test_pair_long_pre_one_null:", func() { + s, err := db.RunScript("a=(:1048576l);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, 1048576) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + Convey("Test_pair_long_last_one_null:", func() { + s, err := db.RunScript("a=(-1048576l:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, -1048576) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_double(t *testing.T) { + Convey("Test_pair_double:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_double_not_null:", func() { + s, err := db.RunScript("a=(-2.0:1048576.0);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, -2.0) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, 1048576) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_pair_double_pre_one_null:", func() { + s, err := db.RunScript("a=(:-1048576.0);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, -1048576) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_pair_double_last_one_null:", func() { + s, err := db.RunScript("a=(1024.0:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, 1024) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_float(t *testing.T) { + Convey("Test_pair_float:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_float_not_null:", func() { + s, err := db.RunScript("a=(-2.0f:1048576.0f);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, -2.0) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, 1048576.0) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_pair_float_pre_one_null:", func() { + s, err := db.RunScript("a=(:-1048576.0f);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, -1048576.0) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_pair_float_last_one_null:", func() { + s, err := db.RunScript("a=(-1024.0f:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, -1024) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_date(t *testing.T) { + Convey("Test_pair_date:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_date_not_null:", func() { + s, err := db.RunScript("a=(1969.12.31:2006.01.02);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC)} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + Convey("Test_pair_date_pre_one_null:", func() { + s, err := db.RunScript("a=(:2006.01.02);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC)} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + Convey("Test_pair_last_pre_one_null:", func() { + s, err := db.RunScript("a=(1969.12.31:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC)} + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_month(t *testing.T) { + Convey("Test_pair_month:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_month_not_null:", func() { + s, err := db.RunScript("a=(1969.12M:2006.01M);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC)} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + Convey("Test_pair_month_pre_one_null:", func() { + s, err := db.RunScript("a=(:2006.01M);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC)} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + Convey("Test_pair_month_last_one_null:", func() { + s, err := db.RunScript("a=(1969.12M:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC)} + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_time(t *testing.T) { + Convey("Test_pair_time:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_time_not_null:", func() { + s, err := db.RunScript("a=(11:11:11.000:12:12:12.222);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 11, 11, 11, 0, time.UTC), time.Date(1970, 1, 1, 12, 12, 12, 222000000, time.UTC)} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + }) + Convey("Test_pair_time_pre_one_null:", func() { + s, err := db.RunScript("a=(:12:12:12.222);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 11, 11, 11, 0, time.UTC), time.Date(1970, 1, 1, 12, 12, 12, 222000000, time.UTC)} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + }) + Convey("Test_pair_time_last_one_null:", func() { + s, err := db.RunScript("a=(11:11:11.000:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 11, 11, 11, 0, time.UTC), time.Date(1970, 1, 1, 12, 12, 12, 0, time.UTC)} + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_minute(t *testing.T) { + Convey("Test_pair_minute:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_minute_not_null:", func() { + s, err := db.RunScript("a=(11:11m:12:12m);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 11, 11, 0, 0, time.UTC), time.Date(1970, 1, 1, 12, 12, 0, 0, time.UTC)} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + }) + Convey("Test_pair_minute_pre_one_null:", func() { + s, err := db.RunScript("a=( :12:12m);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 11, 11, 0, 0, time.UTC), time.Date(1970, 1, 1, 12, 12, 0, 0, time.UTC)} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + }) + Convey("Test_pair_minute_last_one_null:", func() { + s, err := db.RunScript("a=(11:11m:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 11, 11, 0, 0, time.UTC), time.Date(1970, 1, 1, 12, 12, 0, 0, time.UTC)} + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_second(t *testing.T) { + Convey("Test_pair_second:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_second_not_null:", func() { + s, err := db.RunScript("a=(11:11:11:12:12:12);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 11, 11, 11, 0, time.UTC), time.Date(1970, 1, 1, 12, 12, 12, 0, time.UTC)} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + }) + Convey("Test_pair_second_pre_one_null:", func() { + s, err := db.RunScript("a=(:12:12:12);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 11, 11, 11, 0, time.UTC), time.Date(1970, 1, 1, 12, 12, 12, 0, time.UTC)} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + }) + Convey("Test_pair_second_last_one_null:", func() { + s, err := db.RunScript("a=(11:11:11:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 11, 11, 11, 0, time.UTC), time.Date(1970, 1, 1, 12, 12, 12, 0, time.UTC)} + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_datetime(t *testing.T) { + Convey("Test_pair_datetime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_datetime_not_null:", func() { + s, err := db.RunScript("a=(1969.12.31 23:59:59:2006.01.02 15:04:04);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC)} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + Convey("Test_pair_datetime_pre_one_null:", func() { + s, err := db.RunScript("a=(:2006.01.02 15:04:04);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC)} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + Convey("Test_pair_datetime_last_one_null:", func() { + s, err := db.RunScript("a=(1969.12.31 23:59:59:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC)} + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_timestamp(t *testing.T) { + Convey("Test_pair_timestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_timestamp_not_null:", func() { + s, err := db.RunScript("a=(1969.12.31 23:59:59.999:2006.01.02 15:04:04.999);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC)} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + Convey("Test_pair_timestamp_pre_one_null:", func() { + s, err := db.RunScript("a=(:2006.01.02 15:04:04.999);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC)} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + Convey("Test_pair_timestamp_last_one_null:", func() { + s, err := db.RunScript("a=(1969.12.31 23:59:59.999:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC)} + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_nanotime(t *testing.T) { + Convey("Test_pair_nanotime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_nanotime_not_null:", func() { + s, err := db.RunScript("a=(23:59:59.999999999:15:04:04.999999999);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC)} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + }) + Convey("Test_pair_nanotime_pre_one_null:", func() { + s, err := db.RunScript("a=(:15:04:04.999999999);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC)} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + }) + Convey("Test_pair_nanotime_last_one_null:", func() { + s, err := db.RunScript("a=(23:59:59.999999999:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC)} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_nanotimestamp(t *testing.T) { + Convey("Test_pair_nanotimestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_nanotimestamp_not_null:", func() { + s, err := db.RunScript("a=(1969.12.31 23:59:59.999999999:2006.01.02 15:04:04.999999999);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + Convey("Test_pair_nanotimestamp_pre_one_null:", func() { + s, err := db.RunScript("a=(:2006.01.02 15:04:04.999999999);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + Convey("Test_pair_nanotimestamp_last_one_null :", func() { + s, err := db.RunScript("a=(1969.12.31 23:59:59.999999999:);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)} + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_datehour(t *testing.T) { + Convey("Test_pair_datehour:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_datehour_not_null:", func() { + s, err := db.RunScript("a=(datehour(1969.12.31 23:59:59.999999999):datehour(2006.01.02 15:04:04.999999999));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + }) + Convey("Test_pair_datehour_pre_one_null :", func() { + s, err := db.RunScript("a=(:datehour(2006.01.02 15:04:04.999999999));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + }) + Convey("Test_pair_datehour_last_one_null:", func() { + s, err := db.RunScript("a=(datehour(1969.12.31 23:59:59.999999999):);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []time.Time{time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_uuid(t *testing.T) { + Convey("Test_pair_uuid:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_uuid_not_null:", func() { + s, err := db.RunScript("a = (uuid('e5345c41-da6d-d400-1b5a-6ca6e8a52ec0'):uuid('f521c024-3a1d-b043-fb68-822b8ba047a8'));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"e5345c41-da6d-d400-1b5a-6ca6e8a52ec0", "f521c024-3a1d-b043-fb68-822b8ba047a8"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 19) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuid") + }) + Convey("Test_pair_uuid_pre_one_null:", func() { + s, err := db.RunScript("a = (:uuid('f521c024-3a1d-b043-fb68-822b8ba047a8'));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"e5345c41-da6d-d400-1b5a-6ca6e8a52ec0", "f521c024-3a1d-b043-fb68-822b8ba047a8"} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 19) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuid") + }) + Convey("Test_pair_uuid_last_one_null:", func() { + s, err := db.RunScript("a = (uuid('e5345c41-da6d-d400-1b5a-6ca6e8a52ec0'):);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"e5345c41-da6d-d400-1b5a-6ca6e8a52ec0", "f521c024-3a1d-b043-fb68-822b8ba047a8"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + reType := result.GetDataType() + So(reType, ShouldEqual, 19) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuid") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_iapaddr(t *testing.T) { + Convey("Test_pair_ipaddr:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_ipaddr_not_null:", func() { + s, err := db.RunScript("a = (ipaddr('461c:7fa1:7f3c:7249:5278:c610:f595:d174'):ipaddr('3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72'));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"461c:7fa1:7f3c:7249:5278:c610:f595:d174", "3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + }) + Convey("Test_pair_ipaddr number:", func() { + s, err := db.RunScript("a = (ipaddr('192.13.1.33'):ipaddr('191.168.1.13'));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"192.13.1.33", "191.168.1.13"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + }) + Convey("Test_pair_ipaddr_pre_one_null:", func() { + s, err := db.RunScript("a = (:ipaddr('3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72'));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"461c:7fa1:7f3c:7249:5278:c610:f595:d174", "3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72"} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + }) + Convey("Test_pair_ipaddr_last_one_null:", func() { + s, err := db.RunScript("a = (ipaddr('461c:7fa1:7f3c:7249:5278:c610:f595:d174'):);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"461c:7fa1:7f3c:7249:5278:c610:f595:d174", "3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + reType := result.GetDataType() + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_int128(t *testing.T) { + Convey("Test_pair_int128:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_int128_not_null:", func() { + s, err := db.RunScript("a = (int128('e1671797c52e15f763380b45e841ec32'):int128('e1671797c52e15f763380b45e841ec33'));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 31) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int128") + }) + Convey("Test_pair_int128_pre_one_null:", func() { + s, err := db.RunScript("a = (:int128('e1671797c52e15f763380b45e841ec33'));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33"} + So(result.Vector.Data.Get(0).IsNull(), ShouldBeTrue) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 31) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int128") + }) + Convey("Test_pair_int128_last_one_null:", func() { + s, err := db.RunScript("a = (int128('e1671797c52e15f763380b45e841ec32'):);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).IsNull(), ShouldBeTrue) + reType := result.GetDataType() + So(reType, ShouldEqual, 31) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int128") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_point(t *testing.T) { + Convey("Test_pair_point:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_point_not_null:", func() { + s, err := db.RunScript("a = (point(2,3):point(5,2));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"(2.00000, 3.00000)", "(5.00000, 2.00000)"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + Convey("Test_pair_point_pre_one_null:", func() { + s, err := db.RunScript("a = (:point(5,2));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"(0.00000, 0.00000)", "(5.00000, 2.00000)"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + Convey("Test_pair_point_last_one_null:", func() { + s, err := db.RunScript("a = (point(2,3):);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + fmt.Print(result) + zx := []string{"(2.00000, 3.00000)", "(0.00000, 0.00000)"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_complex(t *testing.T) { + Convey("Test_pair_complex:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_complex_not_null:", func() { + s, err := db.RunScript("a = (complex(2,3):complex(5,2));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"2.00000+3.00000i", "5.00000+2.00000i"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + Convey("Test_pair_point_pre_one_null:", func() { + s, err := db.RunScript("a = (:complex(5,2));a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"0.00000+0.00000i", "5.00000+2.00000i"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + Convey("Test_pair_point_last_one_null:", func() { + s, err := db.RunScript("a = (complex(2,3):);a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"2.00000+3.00000i", "0.00000+0.00000i"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_DownLoad_duration(t *testing.T) { + Convey("Test_pair_duration:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_duration_not_null:", func() { + s, err := db.RunScript("a = 1H:1s;a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"1H", "1s"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + Convey("Test_pair_duration_pre_one_null:", func() { + s, err := db.RunScript("a = :1s;a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"0", "1s"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + Convey("Test_pair_duration_last_one_null:", func() { + s, err := db.RunScript("a = 1H:;a") + So(err, ShouldBeNil) + result := s.(*model.Pair) + zx := []string{"1H", "0"} + So(result.Vector.Data.Get(0).Value(), ShouldEqual, zx[0]) + So(result.Vector.Data.Get(1).Value(), ShouldEqual, zx[1]) + reType := result.GetDataType() + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_int(t *testing.T) { + Convey("Test_pair_int_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_int:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtInt, []int32{-211, 9984}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []int32{-211, 9984} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(INT PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtInt) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_short(t *testing.T) { + Convey("Test_pair_short_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_short:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtShort, []int16{-211, 9984}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []int16{-211, 9984} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(SHORT PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtShort) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_char(t *testing.T) { + Convey("Test_pair_char_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_char:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtChar, []uint8{127, 84}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []uint8{127, 84} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(CHAR PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtChar) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_long(t *testing.T) { + Convey("Test_pair_long_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_long:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtLong, []int64{1212457, -21655484}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []int64{1212457, -21655484} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(LONG PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtLong) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_float(t *testing.T) { + Convey("Test_pair_float_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_float:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtFloat, []float32{1212.457, -216.55484}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []float32{1212.457, -216.55484} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FLOAT PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtFloat) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_double(t *testing.T) { + Convey("Test_pair_double_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_double:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtDouble, []float64{1212.457, -216.55484}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []float64{1212.457, -216.55484} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DOUBLE PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtDouble) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_date(t *testing.T) { + Convey("Test_pair_date_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_date:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DATE PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtDate) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_month(t *testing.T) { + Convey("Test_pair_month_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_month:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtMonth, []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(MONTH PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtMonth) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_time(t *testing.T) { + Convey("Test_pair_time_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_time:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtTime, []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(TIME PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtTime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_minute(t *testing.T) { + Convey("Test_pair_minute_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_minute:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtMinute, []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(MINUTE PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtMinute) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_second(t *testing.T) { + Convey("Test_pair_second_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_second:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtSecond, []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(SECOND PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtSecond) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_datetime(t *testing.T) { + Convey("Test_pair_datetime_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_datetime:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtDatetime, []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(2006, 1, 2, 15, 04, 04, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DATETIME PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtDatetime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_timestamp(t *testing.T) { + Convey("Test_pair_timestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_timestamp:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtTimestamp, []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 04, 04, 999000000, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(TIMESTAMP PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtTimestamp) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_nanotime(t *testing.T) { + Convey("Test_pair_nanotime_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_nanotime:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtNanoTime, []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(NANOTIME PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtNanoTime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_nanotimestamp(t *testing.T) { + Convey("Test_pair_nanotimestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_nanotimestamp:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtNanoTimestamp, []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(NANOTIMESTAMP PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtNanoTimestamp) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_datehour(t *testing.T) { + Convey("Test_pair_datehour_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_datehour:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtDateHour, []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DATEHOUR PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtDateHour) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_point(t *testing.T) { + Convey("Test_pair_point_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_point:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtPoint, [][2]float64{{-1, -1024.5}, {1001022.4, -30028.75}}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []string{"(-1.00000, -1024.50000)", "(1001022.40000, -30028.75000)"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(POINT PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtPoint) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_complex(t *testing.T) { + Convey("Test_pair_complex_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_complex:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtComplex, [][2]float64{{-1, -1024.5}, {1001022.4, -30028.75}}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []string{"-1.00000+-1024.50000i", "1001022.40000+-30028.75000i"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(COMPLEX PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtComplex) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_string(t *testing.T) { + Convey("Test_pair_string_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_string:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtString, []string{"#$%", "数据类型"}) + pair := model.NewPair(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []string{"#$%", "数据类型"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(STRING PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtString) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_bool(t *testing.T) { + Convey("Test_pair_bool_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_bool:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtBool, []byte{1, 0}) + pair := model.NewPair(model.NewVector(data)) + So(err, ShouldBeNil) + _, err = db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []bool{true, false} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(BOOL PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtBool) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_uuid(t *testing.T) { + Convey("Test_pair_uuid_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_uuid:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtUUID, []string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee88"}) + pair := model.NewPair(model.NewVector(data)) + So(err, ShouldBeNil) + _, err = db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee88"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(UUID PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtUUID) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_int128(t *testing.T) { + Convey("Test_pair_int128_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_int128:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtInt128, []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33"}) + pair := model.NewPair(model.NewVector(data)) + So(err, ShouldBeNil) + _, err = db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(INT128 PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtInt128) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_ipaddr(t *testing.T) { + Convey("Test_pair_ipaddr_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_ipaddr:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtIP, []string{"0.0.0.0", "127.0.0.1"}) + pair := model.NewPair(model.NewVector(data)) + So(err, ShouldBeNil) + _, err = db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []string{"0.0.0.0", "127.0.0.1"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(IPADDR PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtIP) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Pair_UpLoad_duration(t *testing.T) { + Convey("Test_pair_duration_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_pair_duration:", func() { + data, err := model.NewDataTypeListWithRaw(model.DtDuration, []string{"1H", "52s"}) + pair := model.NewPair(model.NewVector(data)) + So(err, ShouldBeNil) + _, err = db.Upload(map[string]model.DataForm{"s": pair}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Pair).Vector.Data.Value() + zx := []string{"1H", "52s"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DURATION PAIR)") + So(res.GetDataType(), ShouldEqual, model.DtDuration) + }) + So(db.Close(), ShouldBeNil) + }) +} diff --git a/test/basicTypeTest/basicScalar_test.go b/test/basicTypeTest/basicScalar_test.go new file mode 100644 index 0000000..863fa93 --- /dev/null +++ b/test/basicTypeTest/basicScalar_test.go @@ -0,0 +1,2385 @@ +package test + +import ( + "bytes" + "context" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/dialer/protocol" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Scalar_DownLoad_Datatype_bool(t *testing.T) { + Convey("Test_scalar_bool:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_bool_not_null:", func() { + s, err := db.RunScript("true") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldBeTrue) + So(reType, ShouldEqual, 1) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "bool") + form := result.GetDataForm() + So(form, ShouldEqual, 0) + }) + Convey("Test_scalar_bool_null:", func() { + s, err := db.RunScript("bool()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 1) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "bool") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_blob(t *testing.T) { + Convey("Test_scalar_blob:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_blob_not_null:", func() { + s, err := db.RunScript("str='hello';blob(str)") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.Value() + So(re, ShouldResemble, []uint8{104, 101, 108, 108, 111}) + reType := result.GetDataType() + So(reType, ShouldEqual, 32) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "blob") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = result.Render(w, protocol.LittleEndian) + w.Flush() + So(err, ShouldBeNil) + So(by.String(), ShouldNotBeNil) + }) + Convey("Test_scalar_blob_null:", func() { + s, err := db.RunScript("str='';blob(str)") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.Value() + So(re, ShouldResemble, []uint8(nil)) + reType := result.GetDataType() + So(reType, ShouldEqual, 32) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "blob") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_void(t *testing.T) { + Convey("Test_scalar_void:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_void_not_null:", func() { + s, err := db.RunScript("NULL") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(reType, ShouldEqual, 0) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "void") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = result.Render(w, protocol.LittleEndian) + w.Flush() + So(err, ShouldBeNil) + So(by.String(), ShouldNotBeNil) + }) + Convey("Test_scalar_void_null:", func() { + data, _ := model.NewDataType(model.DtVoid, []byte{12}) + s := model.NewScalar(data) + So(s.GetDataTypeString(), ShouldEqual, "void") + So(s.Value(), ShouldEqual, "void(null)") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_char(t *testing.T) { + Convey("Test_scalar_char:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_char_not_null:", func() { + s, err := db.RunScript("97c") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 97) + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + }) + Convey("Test_scalar_char_null:", func() { + s, err := db.RunScript("char()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + get := result.String() + So(get, ShouldEqual, "char()") + row := result.Rows() + So(row, ShouldEqual, 1) + result1 := s.(*model.Scalar).SetNull + So(result1, ShouldNotBeNil) + result.SetNull() + }) + Convey("Test_scalar_char_eql_max_range(2^7-1):", func() { + s, err := db.RunScript("char(127)") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 127) + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + s1, err := db.RunScript("char(-127)") + So(err, ShouldBeNil) + result1 := s1.(*model.Scalar) + re1 := result1.DataType.Value() + reType1 := result1.GetDataType() + So(re1, ShouldEqual, -127) + So(reType1, ShouldEqual, 2) + reTypeString1 := result1.GetDataTypeString() + So(reTypeString1, ShouldEqual, "char") + }) + Convey("Test_scalar_char_over_range:", func() { + s, err := db.RunScript("char(128)") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldBeTrue) + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + s1, err := db.RunScript("char(-128)") + So(err, ShouldBeNil) + result1 := s1.(*model.Scalar) + reType1 := result1.GetDataType() + So(result1.IsNull(), ShouldEqual, true) + So(reType1, ShouldEqual, 2) + reTypeString1 := result1.GetDataTypeString() + So(reTypeString1, ShouldEqual, "char") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_short(t *testing.T) { + Convey("Test_scalar_short:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_short_not_null:", func() { + s, err := db.RunScript("100h") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 100) + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + Convey("Test_scalar_short_null:", func() { + s, err := db.RunScript("short()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + Convey("Test_scalar_short_negative:", func() { + s, err := db.RunScript("-112h") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, -112) + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + Convey("Test_scalar_short_eql_2^10:", func() { + s, err := db.RunScript("1024h") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1024) + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + Convey("Test_scalar_short_small_2^10:", func() { + s, err := db.RunScript("1022h") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1022) + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + Convey("Test_scalar_short_big 2^10:", func() { + s, err := db.RunScript("1026h") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1026) + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + Convey("Test_scalar_short_eql_max_range(2^15-1):", func() { + s, err := db.RunScript("a = short(pow(2,15)-1);a") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 32767) + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + s1, err := db.RunScript("a = short(-32767);a") + So(err, ShouldBeNil) + result1 := s1.(*model.Scalar) + re1 := result1.DataType.Value() + reType1 := result1.GetDataType() + So(re1, ShouldEqual, -32767) + So(reType1, ShouldEqual, 3) + reTypeString1 := result1.GetDataTypeString() + So(reTypeString1, ShouldEqual, "short") + }) + Convey("Test_scalar_short_over_range:", func() { + s, err := db.RunScript("a = short(32768);a") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + s1, err := db.RunScript("a = short(-32768);a") + So(err, ShouldBeNil) + result1 := s1.(*model.Scalar) + reType1 := result1.GetDataType() + So(result1.IsNull(), ShouldEqual, true) + So(reType1, ShouldEqual, 3) + reTypeString1 := result1.GetDataTypeString() + So(reTypeString1, ShouldEqual, "short") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + by.Reset() + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_int(t *testing.T) { + Convey("Test_scalar_int:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_int_not_null:", func() { + s, err := db.RunScript("100") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 100) + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + by.Reset() + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + }) + Convey("Test_scalar_int_null:", func() { + s, err := db.RunScript("int()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + Convey("Test_scalar_int_negative:", func() { + s, err := db.RunScript("-100") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, -100) + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + Convey("Test_scalar_int_eql_2^10:", func() { + s, err := db.RunScript("a = int(1024);a") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1024) + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + Convey("Test_scalar_int_small_2^10:", func() { + s, err := db.RunScript("a = int(1022);a") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1022) + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + Convey("Test_scalar_int_big_2^10:", func() { + s, err := db.RunScript("a = int(1026);a") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1026) + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + Convey("Test_scalar_int_eql_2^20:", func() { + s, err := db.RunScript("a = int(1048576);a") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048576) + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + Convey("Test_scalar_int_small_2^20:", func() { + s, err := db.RunScript("a = int(1048574);a") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048574) + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + Convey("Test_scalar_int_big_2^20:", func() { + s, err := db.RunScript("a = int(1048578);a") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048578) + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + Convey("Test_scalar_int_eql_max_range(2^31-1):", func() { + s, err := db.RunScript("a = int(2147483647);a") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 2147483647) + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + s1, err := db.RunScript("a = int(-2147483647);a") + So(err, ShouldBeNil) + result1 := s1.(*model.Scalar) + re1 := result1.DataType.Value() + reType1 := result1.GetDataType() + So(re1, ShouldEqual, -2147483647) + So(reType1, ShouldEqual, 4) + reTypeString1 := result1.GetDataTypeString() + So(reTypeString1, ShouldEqual, "int") + }) + Convey("Test_scalar_int_over_range:", func() { + s, err := db.RunScript("int(2147483648)") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + s1, err := db.RunScript("int(-2147483648)") + So(err, ShouldBeNil) + result1 := s1.(*model.Scalar) + reType1 := result1.GetDataType() + So(result1.IsNull(), ShouldEqual, true) + So(reType1, ShouldEqual, 4) + reTypeString1 := result.GetDataTypeString() + So(reTypeString1, ShouldEqual, "int") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_long(t *testing.T) { + Convey("Test_scalar_long:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_long_not_null:", func() { + s, err := db.RunScript("22l") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 22) + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + by.Reset() + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + }) + Convey("Test_scalar_long_null:", func() { + s, err := db.RunScript("long()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + Convey("Test_scalar_long_negative:", func() { + s, err := db.RunScript("-122l") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, -122) + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + Convey("Test_scalar_long_eql_2^10:", func() { + s, err := db.RunScript("1024l") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1024) + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + Convey("Test_scalar_long_small_2^10:", func() { + s, err := db.RunScript("1022l") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1022) + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + Convey("Test_scalar_long_big_2^10:", func() { + s, err := db.RunScript("1026l") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1026) + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + Convey("Test_scalar_long_eql_2^20:", func() { + s, err := db.RunScript("1048576l") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048576) + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + Convey("Test_scalar_long_small_2^20:", func() { + s, err := db.RunScript("1048574l") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048574) + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + Convey("Test_scalar_long_big_2^20:", func() { + s, err := db.RunScript("1048578l") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048578) + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_date(t *testing.T) { + Convey("Test_scalar_date:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_date_not_null:", func() { + s, err := db.RunScript("2022.12.31") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2022-12-31 00:00:00" + t1, _ := time.Parse("2006-01-02 15:04:05", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + Convey("Test_scalar_date_null:", func() { + s, err := db.RunScript("date()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + Convey("Test_scalar_date_scalar_early_1970:", func() { + s, err := db.RunScript("1922.07.28") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + a := result.String() + So(a, ShouldEqual, "date(1922.07.28)") + time1 := "1922-07-28 00:00:00" + t1, _ := time.Parse("2006-01-02 15:04:05", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_month(t *testing.T) { + Convey("Test_scalar_month:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_month_not_null:", func() { + s, err := db.RunScript("2022.07M") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + time1 := "2022-07" + t1, _ := time.Parse("2006-01", time1) + reType := result.GetDataType() + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + So(result.String(), ShouldEqual, "month(2022.07M)") + }) + Convey("Test_scalar_month_null:", func() { + s, err := db.RunScript("month()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + Convey("Test_scalar_month_early_1970:", func() { + s, err := db.RunScript("1922.07M") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + time1 := "1922-07" + t1, _ := time.Parse("2006-01", time1) + reType := result.GetDataType() + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_time(t *testing.T) { + Convey("Test_scalar_time:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_time_not_null:", func() { + s, err := db.RunScript("14:00:28.008") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + time1 := "1970-01-01T14:00:28.008" + t1, _ := time.Parse("2006-01-02T15:04:05.000", time1) + reType := result.GetDataType() + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + So(result.String(), ShouldEqual, "time(14:00:28.008)") + }) + Convey("Test_scalar_time_range:", func() { + s, err := db.RunScript("23:59:59.999") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + time1 := "1970-01-01T23:59:59.999" + t1, _ := time.Parse("2006-01-02T15:04:05.000", time1) + reType := result.GetDataType() + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + So(result.String(), ShouldEqual, "time(23:59:59.999)") + }) + Convey("Test_scalar_time_null:", func() { + s, err := db.RunScript("time()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_minute(t *testing.T) { + Convey("Test_scalar_minute:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_minute_not_null:", func() { + s, err := db.RunScript("14:00m") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + time1 := "1970-01-01T14:00" + t1, _ := time.Parse("2006-01-02T15:04", time1) + reType := result.GetDataType() + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + So(result.String(), ShouldEqual, "minute(14:00m)") + }) + Convey("Test_scalar_minute_null:", func() { + s, err := db.RunScript("minute()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_second(t *testing.T) { + Convey("Test_scalar_second:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_second_not_null:", func() { + s, err := db.RunScript("14:00:20") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + time1 := "1970-01-01T14:00:20" + t1, _ := time.Parse("2006-01-02T15:04:05", time1) + reType := result.GetDataType() + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + So(result.String(), ShouldEqual, "second(14:00:20)") + }) + Convey("Test_scalar_second_range:", func() { + s, err := db.RunScript("23:59:59") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + time1 := "1970-01-01T23:59:59" + t1, _ := time.Parse("2006-01-02T15:04:05", time1) + reType := result.GetDataType() + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + So(result.String(), ShouldEqual, "second(23:59:59)") + }) + Convey("Test_scalar_second_null:", func() { + s, err := db.RunScript("second()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_dattime(t *testing.T) { + Convey("Test_scalar_datetime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_datetime_not_null:", func() { + s, err := db.RunScript("2022.07.28 14:00:20") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2022-07-28 14:00:20" + t1, _ := time.Parse("2006-01-02 15:04:05", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + So(result.String(), ShouldEqual, "datetime(2022.07.28T14:00:20)") + }) + Convey("Test_scalar_datetime_range:", func() { + s, err := db.RunScript("2022.12.31 23:59:59") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2022-12-31 23:59:59" + t1, _ := time.Parse("2006-01-02 15:04:05", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + So(result.String(), ShouldEqual, "datetime(2022.12.31T23:59:59)") + }) + Convey("Test_scalar_datetime_range_about_2006:", func() { + s, err := db.RunScript("2006.01.02 15:04:03") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2006-01-02 15:04:03" + t1, _ := time.Parse("2006-01-02 15:04:05", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + Convey("Test_scalar_datetime_null:", func() { + s, err := db.RunScript("datetime()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + Convey("Test_scalar_datetime_early_1970:", func() { + s, err := db.RunScript("2022.07.28 14:00:20") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2022-07-28 14:00:20" + t1, _ := time.Parse("2006-01-02 15:04:05", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_timestamp(t *testing.T) { + Convey("Test_scalar_timestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_timestamp_not_null:", func() { + s, err := db.RunScript("2022.07.28 14:00:20.008") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2022-07-28 14:00:20.008" + t1, _ := time.Parse("2006-01-02 15:04:05.000", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + So(result.String(), ShouldEqual, "timestamp(2022.07.28T14:00:20.008)") + }) + Convey("Test_scalar_timestamp_range:", func() { + s, err := db.RunScript("2022.12.31 23:59:59.999") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2022-12-31 23:59:59.999" + t1, _ := time.Parse("2006-01-02 15:04:05.000", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + Convey("Test_scalar_timestamp_range_about_2006:", func() { + s, err := db.RunScript("2006.01.02 15:04:04.999") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2006-01-02 15:04:04.999" + t1, _ := time.Parse("2006-01-02 15:04:05.000", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + Convey("Test_scalar_timestamp_null:", func() { + s, err := db.RunScript("timestamp()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + Convey("Test_scalar_timestamp_early_1970:", func() { + s, err := db.RunScript("1922.07.28 14:00:20.008") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "1922-07-28 14:00:20.008" + t1, _ := time.Parse("2006-01-02 15:04:05.000", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_nanotime(t *testing.T) { + Convey("Test_scalar_nanotime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_nanotime_not_null:", func() { + s, err := db.RunScript("14:00:20.008000008") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "1970-01-01 14:00:20.008000008" + t1, _ := time.Parse("2006-01-02 15:04:05.000000000", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + So(result.String(), ShouldEqual, "nanotime(14:00:20.008000008)") + }) + Convey("Test_scalar_nanotime_range:", func() { + s, err := db.RunScript("23:59:59.999999999") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "1970-01-01 23:59:59.999999999" + t1, _ := time.Parse("2006-01-02 15:04:05.000000000", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + So(result.String(), ShouldEqual, "nanotime(23:59:59.999999999)") + }) + Convey("Test_scalar_nanotime_null:", func() { + s, err := db.RunScript("nanotime()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_nanotimestamp(t *testing.T) { + Convey("Test_scalar_nanotimestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_nanotimestamp_not_null:", func() { + s, err := db.RunScript("2022.07.28 14:00:20.008000008") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2022-07-28 14:00:20.008000008" + t1, _ := time.Parse("2006-01-02 15:04:05.000000000", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + So(result.String(), ShouldEqual, "nanotimestamp(2022.07.28T14:00:20.008000008)") + }) + Convey("Test_scalar_nanotimestamp_range:", func() { + s, err := db.RunScript("2022.12.31 23:59:59.999999999") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2022-12-31 23:59:59.999999999" + t1, _ := time.Parse("2006-01-02 15:04:05.000000000", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + So(result.String(), ShouldEqual, "nanotimestamp(2022.12.31T23:59:59.999999999)") + }) + Convey("Test_scalar_nanotimestamp_range_about_2006:", func() { + s, err := db.RunScript("2006.01.02 15:04:04.999999999") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2006-01-02 15:04:04.999999999" + t1, _ := time.Parse("2006-01-02 15:04:05.000000000", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + So(result.String(), ShouldEqual, "nanotimestamp(2006.01.02T15:04:04.999999999)") + }) + Convey("Test_scalar_nanotimestamp_null:", func() { + s, err := db.RunScript("nanotimestamp()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + Convey("Test_scalar_nanotimestamp_early_1970:", func() { + s, err := db.RunScript("1922.07.28 14:00:20.008000008") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "1922-07-28 14:00:20.008000008" + t1, _ := time.Parse("2006-01-02 15:04:05.000000000", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_float(t *testing.T) { + Convey("Test_scalar_float:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_float_not_null:", func() { + s, err := db.RunScript("2.1f") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 2.1) + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_scalar_float_null:", func() { + s, err := db.RunScript("float()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_scalar_float_negative:", func() { + s, err := db.RunScript("-2.1f") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, -2.1) + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_scalar_float_eql_2^10:", func() { + s, err := db.RunScript("1024f") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1024) + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + by.Reset() + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + }) + Convey("Test_scalar_float_small_2^10:", func() { + s, err := db.RunScript("1022f") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1022) + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_scalar_float_big_2^10:", func() { + s, err := db.RunScript("1026f") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1026) + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_scalar_float_eql_2^20:", func() { + s, err := db.RunScript("1048576f") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048576) + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_scalar_float_small_2^20:", func() { + s, err := db.RunScript("1048574f") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048574) + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_scalar_float_big_2^20:", func() { + s, err := db.RunScript("1048578f") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048578) + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_double(t *testing.T) { + Convey("Test_scalar_double:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_double_not_null:", func() { + s, err := db.RunScript("2.1") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 2.1) + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + err = result.Render(w, protocol.LittleEndian) + w.Flush() + So(err, ShouldBeNil) + So(by.String(), ShouldNotBeNil) + }) + Convey("Test_scalar_double_null:", func() { + s, err := db.RunScript("double()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_scalar_double_negative:", func() { + s, err := db.RunScript("-2.1") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, -2.1) + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_scalar_double_eql_2^10:", func() { + s, err := db.RunScript("1024.0") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1024.0) + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_scalar_double_small_2^10:", func() { + s, err := db.RunScript("1022.0") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1022.0) + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_scalar_double_big_2^10:", func() { + s, err := db.RunScript("1026.0") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1026.0) + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_scalar_double_eql_2^20:", func() { + s, err := db.RunScript("1048576.0") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048576.0) + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_scalar_double_small_2^20:", func() { + s, err := db.RunScript("1048574.0") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048574.0) + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_scalar_double_big_2^20:", func() { + s, err := db.RunScript("1048578.0") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, 1048578.0) + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_string(t *testing.T) { + Convey("Test_scalar_string:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_string_not_null:", func() { + s, err := db.RunScript(`"helloworld"`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "helloworld") + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + }) + Convey("Test_scalar_string_null:", func() { + s, err := db.RunScript("string()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_uuid(t *testing.T) { + Convey("Test_scalar_uuid:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_uuid_not_null:", func() { + s, err := db.RunScript("uuid('5d212a78-cc48-e3b1-4235-b4d91473ee87')") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "5d212a78-cc48-e3b1-4235-b4d91473ee87") + So(reType, ShouldEqual, 19) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuid") + }) + Convey("Test_scalar_uuid_null:", func() { + s, err := db.RunScript("uuid()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.Value(), ShouldEqual, "00000000-0000-0000-0000-000000000000") + So(reType, ShouldEqual, 19) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuid") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + by.Reset() + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_datehour(t *testing.T) { + Convey("Test_scalar_datehour:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_datehour_not_null:", func() { + s, err := db.RunScript("datehour('2022.07.28T14')") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "2022-07-28T14" + t1, _ := time.Parse("2006-01-02T15", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + So(result.String(), ShouldEqual, "dateHour(2022.07.28T14)") + }) + Convey("Test_scalar_datehour_null:", func() { + s, err := db.RunScript("datehour()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + }) + Convey("Test_scalar_datehour_early_1970:", func() { + s, err := db.RunScript("datehour('1922.07.28T14')") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + time1 := "1922-07-28T14" + t1, _ := time.Parse("2006-01-02T15", time1) + So(re, ShouldEqual, t1) + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + by.Reset() + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_ipaddr(t *testing.T) { + Convey("Test_scalar_ipaddr:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_ipaddr_not_null:", func() { + s, err := db.RunScript("ipaddr('35dd:4ae6:b1b1:3da9:d777:d2ab:74cc:e05')") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "35dd:4ae6:b1b1:3da9:d777:d2ab:74cc:e05") + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + }) + Convey("Test_scalar_ipaddr_null:", func() { + s, err := db.RunScript("ipaddr()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + by.Reset() + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_int128(t *testing.T) { + Convey("Test_scalar_int128:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_int128_not_null:", func() { + s, err := db.RunScript("int128('e1671797c52e15f763380b45e841ec32')") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "e1671797c52e15f763380b45e841ec32") + So(reType, ShouldEqual, 31) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int128") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + by.Reset() + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + }) + Convey("Test_scalar_int128_null:", func() { + s, err := db.RunScript("int128()") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + reType := result.GetDataType() + So(result.IsNull(), ShouldEqual, true) + So(reType, ShouldEqual, 31) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int128") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + by.Reset() + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_complex(t *testing.T) { + Convey("Test_scalar_complex:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_complex_not_null:", func() { + s, err := db.RunScript(`complex(2,5)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "2.00000+5.00000i") + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + Convey("Test_scalar_complex_zero:", func() { + s, err := db.RunScript("complex(0,0)") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "0.00000+0.00000i") + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + Convey("Test_scalar_complex_nagative:", func() { + s, err := db.RunScript(`complex(-2,-5)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "-2.00000+-5.00000i") + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + Convey("Test_scalar_complex_eql_2^10:", func() { + s, err := db.RunScript(`complex(1024,1024)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "1024.00000+1024.00000i") + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + Convey("Test_scalar_complex_small_2^10:", func() { + s, err := db.RunScript(`complex(1022,1022)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "1022.00000+1022.00000i") + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + Convey("Test_scalar_complex_big_2^10:", func() { + s, err := db.RunScript(`complex(1026,1026)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "1026.00000+1026.00000i") + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + Convey("Test_scalar_complex_eql_2^20:", func() { + s, err := db.RunScript(`complex(1048576,1048576)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "1048576.00000+1048576.00000i") + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + Convey("Test_scalar_complex_small_2^20:", func() { + s, err := db.RunScript(`complex(-1.7976931348623157e+308,1048574)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "") + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + Convey("Test_scalar_complex_big_2^20:", func() { + s, err := db.RunScript(`complex(1048578,1048578)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "1048578.00000+1048578.00000i") + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + by.Reset() + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_point(t *testing.T) { + Convey("Test_scalar_point:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_point_not_null:", func() { + s, err := db.RunScript(`point(2,5)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "(2.00000, 5.00000)") + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + Convey("Test_scalar_point_zero:", func() { + s, err := db.RunScript("point(0,0)") + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "(0.00000, 0.00000)") + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + Convey("Test_scalar_point_negative:", func() { + s, err := db.RunScript(`point(-2,-5)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "(-2.00000, -5.00000)") + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + Convey("Test_scalar_point_eql_2^10:", func() { + s, err := db.RunScript(`point(1024,1024)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "(1024.00000, 1024.00000)") + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + Convey("Test_scalar_point_small_2^10:", func() { + s, err := db.RunScript(`point(1022.5,1022.2)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "(1022.50000, 1022.20000)") + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + Convey("Test_scalar_point_big_2^10:", func() { + s, err := db.RunScript(`point(1026.5,1026.2)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "(1026.50000, 1026.20000)") + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + Convey("Test_scalar_point_eql_2^20:", func() { + s, err := db.RunScript(`point(1048576.0,1048576.0)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "(1048576.00000, 1048576.00000)") + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + Convey("Test_scalar_point_small_2^20:", func() { + s, err := db.RunScript(`point(1048574.0,1048574.0)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "(1048574.00000, 1048574.00000)") + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + Convey("Test_scalar_point_big_2^20:", func() { + s, err := db.RunScript(`point(1048578.0,1048578.0)`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "(1048578.00000, 1048578.00000)") + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_DownLoad_Datatype_duration(t *testing.T) { + Convey("Test_scalar_duration:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_duration_H:", func() { + s, err := db.RunScript(`duration("1H")`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "1H") + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + Convey("Test_scalar_duration_s:", func() { + s, err := db.RunScript(`duration("1s")`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "1s") + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + Convey("Test_scalar_duration_m:", func() { + s, err := db.RunScript(`duration("2m")`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "2m") + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + Convey("Test_scalar_duration_M:", func() { + s, err := db.RunScript(`duration("3M")`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "3M") + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + Convey("Test_scalar_duration_y:", func() { + s, err := db.RunScript(`duration("5y")`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "5y") + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + result.SetNull() + }) + Convey("Test_scalar_duration_d:", func() { + s, err := db.RunScript(`duration("20d")`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "20d") + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + Convey("Test_scalar_duration_w:", func() { + s, err := db.RunScript(`duration("52w")`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "52w") + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + Convey("Test_scalar_duration_B:", func() { + s, err := db.RunScript(`duration("16B")`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "16B") + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + Convey("Test_scalar_duration_ms:", func() { + s, err := db.RunScript(`duration("10ms")`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "10ms") + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + by := bytes.NewBufferString("") + w := protocol.NewWriter(by) + by.Reset() + err = result.Render(w, protocol.LittleEndian) + So(err, ShouldBeNil) + w.Flush() + }) + Convey("Test_scalar_duration_ns:", func() { + s, err := db.RunScript(`duration("1ns")`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "1ns") + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + Convey("Test_scalar_duration_us:", func() { + s, err := db.RunScript(`duration("1us")`) + So(err, ShouldBeNil) + result := s.(*model.Scalar) + re := result.DataType.Value() + reType := result.GetDataType() + So(re, ShouldEqual, "1us") + So(reType, ShouldEqual, 36) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "duration") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_int(t *testing.T) { + Convey("Test_scalar_int_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_int:", func() { + dt, _ := model.NewDataType(model.DtInt, int32(6)) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, 6) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(INT)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtInt) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_bool(t *testing.T) { + Convey("Test_scalar_bool_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_bool:", func() { + dt, err := model.NewDataType(model.DtBool, byte(1)) + So(err, ShouldBeNil) + s := model.NewScalar(dt) + df, err := db.Upload(map[string]model.DataForm{"s": s}) + So(err, ShouldBeNil) + res, err := db.RunScript("s") + So(err, ShouldBeNil) + ty, err := db.RunScript("typestr(s)") + So(err, ShouldBeNil) + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, true) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(BOOL)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtBool) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_blob(t *testing.T) { + Convey("Test_scalar_blob_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_blob:", func() { + dt, _ := model.NewDataType(model.DtBlob, []byte{1}) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldResemble, []uint8{1}) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(BLOB)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtBlob) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_char(t *testing.T) { + Convey("Test_scalar_char_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_char:", func() { + dt, _ := model.NewDataType(model.DtChar, uint8(21)) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, 21) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(CHAR)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtChar) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_short(t *testing.T) { + Convey("Test_scalar_short_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_short:", func() { + dt, _ := model.NewDataType(model.DtShort, int16(28452)) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, 28452) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(SHORT)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtShort) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_long(t *testing.T) { + Convey("Test_scalar_long_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_long:", func() { + dt, _ := model.NewDataType(model.DtLong, int64(154655631)) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, 154655631) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(LONG)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtLong) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_date(t *testing.T) { + Convey("Test_scalar_date_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_date:", func() { + dt, err := model.NewDataType(model.DtDate, time.Date(2022, 3, 8, 0, 0, 0, 0, time.UTC)) + So(err, ShouldBeNil) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + time1 := "2022-03-08 00:00:00" + t1, _ := time.Parse("2006-01-02 15:04:05", time1) + So(re.Value(), ShouldEqual, t1) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DATE)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtDate) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_month(t *testing.T) { + Convey("Test_scalar_month_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_date:", func() { + dt, err := model.NewDataType(model.DtMonth, time.Date(2022, 3, 8, 0, 0, 0, 0, time.UTC)) + So(err, ShouldBeNil) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + time1 := "2022-03" + t1, _ := time.Parse("2006-01", time1) + So(re.Value(), ShouldEqual, t1) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(MONTH)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtMonth) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_datetime(t *testing.T) { + Convey("Test_scalar_datetime_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_datetime:", func() { + dt, err := model.NewDataType(model.DtDatetime, time.Date(2022, 3, 8, 23, 59, 59, 0, time.UTC)) + So(err, ShouldBeNil) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + time1 := "2022-03-08 23:59:59" + t1, _ := time.Parse("2006-01-02 15:04:05", time1) + So(re.Value(), ShouldEqual, t1) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DATETIME)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtDatetime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_time(t *testing.T) { + Convey("Test_scalar_time_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_time:", func() { + dt, err := model.NewDataType(model.DtTime, time.Date(0, 0, 0, 23, 59, 59, 999000000, time.UTC)) + So(err, ShouldBeNil) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + time1 := "1970-01-01T23:59:59.999" + t1, _ := time.Parse("2006-01-02T15:04:05.000", time1) + So(re.Value(), ShouldEqual, t1) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(TIME)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtTime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_timestamp(t *testing.T) { + Convey("Test_scalar_timestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_timestamp:", func() { + dt, err := model.NewDataType(model.DtTimestamp, time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC)) + So(err, ShouldBeNil) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + time1 := "1969-12-31T23:59:59.999" + t1, _ := time.Parse("2006-01-02T15:04:05.000", time1) + So(re.Value(), ShouldEqual, t1) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(TIMESTAMP)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtTimestamp) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_minute(t *testing.T) { + Convey("Test_scalar_minute_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_minute:", func() { + dt, err := model.NewDataType(model.DtMinute, time.Date(0, 0, 0, 23, 59, 0, 0, time.UTC)) + So(err, ShouldBeNil) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + time1 := "1970-01-01T23:59" + t1, _ := time.Parse("2006-01-02T15:04", time1) + So(re.Value(), ShouldEqual, t1) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(MINUTE)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtMinute) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_second(t *testing.T) { + Convey("Test_scalar_second_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_second:", func() { + dt, err := model.NewDataType(model.DtSecond, time.Date(0, 0, 0, 23, 59, 59, 0, time.UTC)) + So(err, ShouldBeNil) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + time1 := "1970-01-01T23:59:59" + t1, _ := time.Parse("2006-01-02T15:04:05", time1) + So(re.Value(), ShouldEqual, t1) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(SECOND)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtSecond) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_nanotime(t *testing.T) { + Convey("Test_scalar_nanotime_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_nanotime:", func() { + dt, err := model.NewDataType(model.DtNanoTime, time.Date(0, 0, 0, 23, 59, 59, 999999999, time.UTC)) + So(err, ShouldBeNil) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + time1 := "1970-01-01T23:59:59.999999999" + t1, _ := time.Parse("2006-01-02T15:04:05.000000000", time1) + So(re.Value(), ShouldEqual, t1) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(NANOTIME)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtNanoTime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_nanotimestamp(t *testing.T) { + Convey("Test_scalar_nanotimestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_nanotimestamp:", func() { + dt, err := model.NewDataType(model.DtNanoTimestamp, time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC)) + So(err, ShouldBeNil) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + time1 := "2022-12-31T23:59:59.999999999" + t1, _ := time.Parse("2006-01-02T15:04:05.000000000", time1) + So(re.Value(), ShouldEqual, t1) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(NANOTIMESTAMP)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtNanoTimestamp) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_datehour(t *testing.T) { + Convey("Test_scalar_datehour_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_datehour:", func() { + dt, err := model.NewDataType(model.DtDateHour, time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC)) + So(err, ShouldBeNil) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + time1 := "2022-12-31T23" + t1, _ := time.Parse("2006-01-02T15", time1) + So(re.Value(), ShouldEqual, t1) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DATEHOUR)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtDateHour) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_float(t *testing.T) { + Convey("Test_scalar_float_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_float:", func() { + dt, _ := model.NewDataType(model.DtFloat, float32(28.5)) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, 28.5) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FLOAT)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtFloat) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_double(t *testing.T) { + Convey("Test_scalar_double_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_double:", func() { + dt, _ := model.NewDataType(model.DtDouble, float64(28.504875)) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, 28.504875) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DOUBLE)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtDouble) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_string(t *testing.T) { + Convey("Test_scalar_string_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_string:", func() { + dt, _ := model.NewDataType(model.DtString, "特殊hello") + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, "特殊hello") + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(STRING)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtString) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_uuid(t *testing.T) { + Convey("Test_scalar_uuid_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_uuid:", func() { + dt, _ := model.NewDataType(model.DtUUID, "5d212a78-cc48-e3b1-4235-b4d91473ee87") + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, "5d212a78-cc48-e3b1-4235-b4d91473ee87") + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(UUID)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtUUID) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_code(t *testing.T) { + Convey("Test_scalar_code_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_code:", func() { + dt, _ := model.NewDataType(model.DtCode, "<1+2>") + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, "<1+2>") + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(STRING)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_int128(t *testing.T) { + Convey("Test_scalar_int128_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_int128:", func() { + dt, _ := model.NewDataType(model.DtInt128, "e1671797c52e15f763380b45e841ec32") + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, "e1671797c52e15f763380b45e841ec32") + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(INT128)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtInt128) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_scalar(t *testing.T) { + Convey("Test_scalar_ipaddr_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_ipaddr:", func() { + dt, _ := model.NewDataType(model.DtIP, "168.130.1.13") + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, "168.130.1.13") + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(IPADDR)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtIP) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_ipaddr(t *testing.T) { + Convey("Test_scalar_ipaddr_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_ipaddr:", func() { + dt, _ := model.NewDataType(model.DtBlob, []byte{12}) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldResemble, []uint8{12}) + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(BLOB)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtBlob) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_complex(t *testing.T) { + Convey("Test_scalar_complex_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_complex:", func() { + dt, _ := model.NewDataType(model.DtComplex, [2]float64{12, 25}) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, "12.00000+25.00000i") + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(COMPLEX)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtComplex) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_point(t *testing.T) { + Convey("Test_scalar_point_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_point:", func() { + dt, _ := model.NewDataType(model.DtPoint, [2]float64{32, -25}) + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, "(32.00000, -25.00000)") + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(POINT)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtPoint) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Scalar_UpLoad_Datatype_duration(t *testing.T) { + Convey("Test_scalar_duration_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_scalar_duration:", func() { + dt, _ := model.NewDataType(model.DtDuration, "1H") + s := model.NewScalar(dt) + df, _ := db.Upload(map[string]model.DataForm{"s": s}) + res, err := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Scalar) + So(re.Value(), ShouldEqual, "1H") + So(res, ShouldResemble, s) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DURATION)") + So(df.GetDataForm(), ShouldEqual, model.DfScalar) + So(res.GetDataType(), ShouldEqual, model.DtDuration) + }) + So(db.Close(), ShouldBeNil) + }) +} diff --git a/test/basicTypeTest/basicSet_test.go b/test/basicTypeTest/basicSet_test.go new file mode 100644 index 0000000..97bcee8 --- /dev/null +++ b/test/basicTypeTest/basicSet_test.go @@ -0,0 +1,1841 @@ +package test + +import ( + "context" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Set_DownLoad_DataType_int(t *testing.T) { + Convey("Test_set_int:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_int_not_null:", func() { + s, err := db.RunScript("a=set(4 5 5 2 3 11 6);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + zx := []int32{6, 11, 3, 2, 5, 4} + re := result.Vector.Data.Value() + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 6) + }) + Convey("Test_set_int_has_null:", func() { + s, err := db.RunScript("a = set(1024 12 -30 15 NULL 2);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []int32{1024, 12, -30, 15, model.NullInt, 2} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 6) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 6) + }) + Convey("Test_set_int_all_null:", func() { + s, err := db.RunScript("a=take(00i,6);b=set(a);b") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, model.NullInt) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + Convey("Test_set_int_has_same_element:", func() { + s, err := db.RunScript("a = set(1024 12 -30 15 1024 2);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []int32{2, 15, -30, 12, 1024} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 5) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 5) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_string(t *testing.T) { + Convey("Test_set_string:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_string_not_null:", func() { + s, err := db.RunScript("a=set('trs1' 'fal' 'rue' 'else');a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []string{"else", "rue", "fal", "trs1"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_string_has_null:", func() { + s, err := db.RunScript("a=set('trs1' '' 'rue' 'else');a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []string{"else", "rue", "", "trs1"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_string_all_null/has_same_ele:", func() { + s, err := db.RunScript("a=set('' '' '' '');a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []string{""} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_char(t *testing.T) { + Convey("Test_set_char:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_char_not_null:", func() { + s, err := db.RunScript("a=set(1c 25c 97c 124c);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + So(re[0], ShouldEqual, 'a') + So(re[0], ShouldEqual, 97) + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_char_has_null:", func() { + s, err := db.RunScript("a=set(1c 25c NULL 124c);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + So(result.Vector.Data.IsNull(0), ShouldBeTrue) + So(re[1], ShouldEqual, 124) + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_char_all_null/has_same_ele:", func() { + s, err := db.RunScript("a=set(char['','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + for j := 0; j < result.Rows(); j++ { + So(result.Vector.Data.IsNull(j), ShouldBeTrue) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_short(t *testing.T) { + Convey("Test_set_short:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_short_not_null:", func() { + s, err := db.RunScript("a=set(-2h 25h -917h 1024h);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []int16{1024, -917, 25, -2} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_short_has_null:", func() { + s, err := db.RunScript("a=set(-2h NULL -917h 1024h);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []int16{1024, -917, model.NullShort, -2} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_short_all_null/has_same_ele:", func() { + s, err := db.RunScript("a=set(take(00h,6));a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, model.NullShort) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_long(t *testing.T) { + Convey("Test_set_long:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_long_not_null:", func() { + s, err := db.RunScript("a=set(-2l 25l -917l 1024l);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []int64{1024, -917, 25, -2} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_long_has_null:", func() { + s, err := db.RunScript("a=set(-2l NULL -917l 1024l);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []int64{1024, -917, model.NullLong, -2} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_long_all_null/has_same_ele:", func() { + s, err := db.RunScript("a=set(take(00l,6));a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, model.NullLong) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_double(t *testing.T) { + Convey("Test_set_double:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_double_not_null:", func() { + s, err := db.RunScript("a=set(-2.0 25.0 -917.0 1024.0);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []float64{1024.0, -917.0, 25.0, -2.0} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_double_has_null:", func() { + s, err := db.RunScript("a=set(-2.0 NULL -917.0 1024.0);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []float64{1024, -917, model.NullDouble, -2} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_double_all_null/has_same_ele:", func() { + s, err := db.RunScript("a=set(double['','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, model.NullDouble) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_float(t *testing.T) { + Convey("Test_set_float:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_float_not_null:", func() { + s, err := db.RunScript("a=set(-2.0f 25.0f -917.0f 1024.0f);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []float32{1024.0, -917.0, 25.0, -2.0} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_float_has_null:", func() { + s, err := db.RunScript("a=set(-2.0f NULL -917.0f 1024.0f);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []float32{1024.0, -917.0, model.NullFloat, -2.0} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_float_all_null/has_same_ele:", func() { + s, err := db.RunScript("a=set(take(00f,6));a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, model.NullFloat) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_date(t *testing.T) { + Convey("Test_set_date:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_date_not_null:", func() { + s, err := db.RunScript("a = set(1969.12.31 2006.01.02 1970.01.01 2006.01.03);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 0, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_date_has_null:", func() { + s, err := db.RunScript("a = set(1969.12.31 NULL 1970.01.01 2006.01.03);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 0, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_date_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(take(00d,4));a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC)) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_month(t *testing.T) { + Convey("Test_set_date:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_month:", func() { + s, err := db.RunScript("a = set(1969.12M 2006.01M 1970.01M 2006.02M);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 2, 1, 0, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_month_has_null:", func() { + s, err := db.RunScript("a = set(1969.12M NULL 1970.01M 2006.02M);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 2, 1, 0, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_month_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(month['','','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_time(t *testing.T) { + Convey("Test_set_time:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_time_not_null:", func() { + s, err := db.RunScript("a = set(23:59:59.999 00:00:01.000 09:11:25.000 11:22:33.000);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(1970, 1, 1, 9, 11, 25, 0, time.UTC), time.Date(1970, 1, 1, 11, 22, 33, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_time_has_null:", func() { + s, err := db.RunScript("a = set(23:59:59.999 00:00:01.000 NULL 11:22:33.000);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 11, 22, 33, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_time_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(time['','','','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_minute(t *testing.T) { + Convey("Test_set_minute:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_minute_not_null:", func() { + s, err := db.RunScript("a = set(23:59m 00:00m 09:11m 11:22m);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 9, 11, 0, 0, time.UTC), time.Date(1970, 1, 1, 11, 22, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_minute_has_null:", func() { + s, err := db.RunScript("a = set(23:59m 00:00m NULL 11:22m);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 11, 22, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_minute_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(minute['','','','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_second(t *testing.T) { + Convey("Test_set_second:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_second_not_null:", func() { + s, err := db.RunScript("a = set(23:59:59 00:00:00 09:11:59 11:22:33);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 9, 11, 59, 0, time.UTC), time.Date(1970, 1, 1, 11, 22, 33, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_second_has_null:", func() { + s, err := db.RunScript("a = set(23:59:59 00:00:00 NULL 11:22:33);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 11, 22, 33, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_second_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(take(00s,6));a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_datetime(t *testing.T) { + Convey("Test_set_datetime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_datetime_not_null:", func() { + s, err := db.RunScript("a = set(1969.12.31 23:59:59 2006.01.02 15:04:04 1970.01.01 00:00:00 2006.01.03 15:04:05);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 15, 4, 5, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_datetime_has_null:", func() { + s, err := db.RunScript("a = set(1969.12.31 23:59:59 2006.01.02 15:04:04 NULL 2006.01.03 15:04:05);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 15, 4, 5, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_datetime_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(datetime['','','','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_timestamp(t *testing.T) { + Convey("Test_set_timestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_timestamp_not_null:", func() { + s, err := db.RunScript("a = set(1969.12.31 23:59:59.999 2006.01.02 15:04:04.999 1970.01.01 00:00:00.000 2006.01.03 15:04:05.999);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 15, 4, 5, 999000000, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_timestamp_has_null:", func() { + s, err := db.RunScript("a = set(1969.12.31 23:59:59.999 2006.01.02 15:04:04.999 NULL 2006.01.03 15:04:05.999);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 15, 4, 5, 999000000, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_timestamp_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(timestamp['','','','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_nanotime(t *testing.T) { + Convey("Test_set_nanotime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_nanotime_not_null:", func() { + s, err := db.RunScript("a = set(23:59:59.999999999 00:00:00.000000000 09:11:59.999999999 11:22:33.445566778);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 9, 11, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 11, 22, 33, 445566778, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_nanotime_has_null:", func() { + s, err := db.RunScript("a = set(23:59:59.999999999 00:00:00.000000000 NULL 11:22:33.445566778);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 11, 22, 33, 445566778, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_nanotime_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(nanotime['','','','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_nanotimestamp(t *testing.T) { + Convey("Test_set_nanotimestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_nanotimestamp_not_null:", func() { + s, err := db.RunScript("a = set(1969.12.31 23:59:59.999999999 2006.01.02 15:04:04.999999999 1970.01.01 00:00:00.000000000 2006.01.03 15:04:05.999999999);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 15, 4, 5, 999999999, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_nanotimestamp_has_null:", func() { + s, err := db.RunScript("a = set(1969.12.31 23:59:59.999999999 2006.01.02 15:04:04.999999999 NULL 2006.01.03 15:04:05.999999999);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 15, 4, 5, 999999999, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_nanotimestamp_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(nanotimestamp['','','','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_datehour(t *testing.T) { + Convey("Test_set_datehour:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_datehour_not_null:", func() { + s, err := db.RunScript("a = set(datehour[1969.12.31 23:14:11,2006.01.02 15:15:11,1970.01.01 00:16:11,2006.01.03 15:17:11]);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 15, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_datehour_has_null:", func() { + s, err := db.RunScript("a = set(datehour[1969.12.31 23:14:11,2006.01.02 15:15:11,,2006.01.03 15:17:11]);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []time.Time{time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 15, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_datehour_all_null:", func() { + s, err := db.RunScript("a = set(datehour['','','','','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC) + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_uuid(t *testing.T) { + Convey("Test_set_uuid:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_uuid_not_null:", func() { + s, err := db.RunScript("a = set(uuid['cd468f9a-1834-1cf5-62b6-26270a9b5d55','c7deab2a-26f0-533d-395d-2b1c3f93116b','dc62fba4-570a-c08e-f175-68744cec24b4','6dea85d7-0e44-8eee-0baa-8ca5eb298338']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []string{"6dea85d7-0e44-8eee-0baa-8ca5eb298338", "dc62fba4-570a-c08e-f175-68744cec24b4", "c7deab2a-26f0-533d-395d-2b1c3f93116b", "cd468f9a-1834-1cf5-62b6-26270a9b5d55"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 19) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuid") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_uuid_has_null:", func() { + s, err := db.RunScript("a = set(uuid['cd468f9a-1834-1cf5-62b6-26270a9b5d55','c7deab2a-26f0-533d-395d-2b1c3f93116b','dc62fba4-570a-c08e-f175-68744cec24b4','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []string{model.NullUUID, "dc62fba4-570a-c08e-f175-68744cec24b4", "c7deab2a-26f0-533d-395d-2b1c3f93116b", "cd468f9a-1834-1cf5-62b6-26270a9b5d55"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 19) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuid") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_uuid_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(uuid['','','','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, model.NullUUID) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 19) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuid") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_ipaddr(t *testing.T) { + Convey("Test_set_ipaddr:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_ipaddr_not_null:", func() { + s, err := db.RunScript("a = set(ipaddr['2a35:5753:12c4:e705:a700:8507:a36e:cd23','1d8:2691:125e:cdaa:2d57:7cdf:428e:f4e5','ad1:4e1e:5961:b56b:8521:9a40:fefc:89ef','40ce:6bf6:af3d:f8f:d4f8:8cef:5d37:2af3']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []string{"40ce:6bf6:af3d:f8f:d4f8:8cef:5d37:2af3", "ad1:4e1e:5961:b56b:8521:9a40:fefc:89ef", "1d8:2691:125e:cdaa:2d57:7cdf:428e:f4e5", "2a35:5753:12c4:e705:a700:8507:a36e:cd23"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_ipaddr_has_null:", func() { + s, err := db.RunScript("a = set(ipaddr['2a35:5753:12c4:e705:a700:8507:a36e:cd23','','ad1:4e1e:5961:b56b:8521:9a40:fefc:89ef','40ce:6bf6:af3d:f8f:d4f8:8cef:5d37:2af3']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []string{"40ce:6bf6:af3d:f8f:d4f8:8cef:5d37:2af3", "ad1:4e1e:5961:b56b:8521:9a40:fefc:89ef", model.NullIP, "2a35:5753:12c4:e705:a700:8507:a36e:cd23"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_ipaddr_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(ipaddr['','','','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, model.NullIP) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_int128(t *testing.T) { + Convey("Test_set_int128:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_int128_not_null:", func() { + s, err := db.RunScript("a = set(int128['c822209cea11798e2e8db17fa5e95d13','b6a5a2586bf064736c80a591a423b98d','84693a7c085a6b2842d8db207ea7c000','e3ddb9ec3328d2908f666dc0fed0a6bc']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []string{"e3ddb9ec3328d2908f666dc0fed0a6bc", "84693a7c085a6b2842d8db207ea7c000", "b6a5a2586bf064736c80a591a423b98d", "c822209cea11798e2e8db17fa5e95d13"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 31) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int128") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_int128_has_null:", func() { + s, err := db.RunScript("a = set(int128['c822209cea11798e2e8db17fa5e95d13','','84693a7c085a6b2842d8db207ea7c000','e3ddb9ec3328d2908f666dc0fed0a6bc']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + zx := []string{"e3ddb9ec3328d2908f666dc0fed0a6bc", "84693a7c085a6b2842d8db207ea7c000", model.NullInt128, "c822209cea11798e2e8db17fa5e95d13"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(result.Vector.RowCount, ShouldEqual, 4) + reType := result.GetDataType() + So(reType, ShouldEqual, 31) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int128") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 4) + }) + Convey("Test_set_int128_all_null/has_same_ele:", func() { + s, err := db.RunScript("a = set(int128['','','','','','']);a") + So(err, ShouldBeNil) + result := s.(*model.Set) + re := result.Vector.Data.Value() + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, model.NullInt128) + } + So(result.Vector.RowCount, ShouldEqual, 1) + reType := result.GetDataType() + So(reType, ShouldEqual, 31) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int128") + form := result.GetDataForm() + So(form, ShouldEqual, 4) + row := result.Rows() + So(row, ShouldEqual, 1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_DownLoad_DataType_set_be_cleared(t *testing.T) { + Convey("Test_set_be_cleaned:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_be_cleared:", func() { + s, err := db.RunScript("a=set(int);a.clear!();a") + So(err, ShouldBeNil) + result := s.(*model.Set) + So(result.Vector.RowCount, ShouldEqual, 0) + So(result.Vector.ColumnCount, ShouldEqual, 1) + So(result.GetDataTypeString(), ShouldEqual, "int") + So(result.GetDataForm(), ShouldEqual, model.DfSet) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_int(t *testing.T) { + Convey("Test_set_int_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_int:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtInt, []int32{1, 2, 3, 4, 5}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []int32{1, 2, 3, 4, 5} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(INT SET)") + So(res.GetDataType(), ShouldEqual, model.DtInt) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_short(t *testing.T) { + Convey("Test_set_short_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_setshort:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtShort, []int16{1, 2, 3, 4, 5, 6, 7, 8, 9}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(SHORT SET)") + So(res.GetDataType(), ShouldEqual, model.DtShort) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_char(t *testing.T) { + Convey("Test_set_char_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_char:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtChar, []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9} + So(re, ShouldNotBeIn, zx) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(CHAR SET)") + So(res.GetDataType(), ShouldEqual, model.DtChar) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_long(t *testing.T) { + Convey("Test_set_long_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_long:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtLong, []int64{1, 2, 3, 4, 5, 6, 7, 8, 9}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []int64{1, 2, 3, 4, 5, 6, 7, 8, 9} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(LONG SET)") + So(res.GetDataType(), ShouldEqual, model.DtLong) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_float(t *testing.T) { + Convey("Test_set_float_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_short:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtFloat, []float32{1, 2, 3, 4, 5, 6, 7, 8, 9}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []float32{1, 2, 3, 4, 5, 6, 7, 8, 9} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FLOAT SET)") + So(res.GetDataType(), ShouldEqual, model.DtFloat) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_double(t *testing.T) { + Convey("Test_set_double_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_double:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtDouble, []float64{1024.2, -2.10, 36897542.233, -5454545454, 8989.12125, 6, 7, 8, 9}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []float64{1024.2, -2.10, 36897542.233, -5454545454, 8989.12125, 6, 7, 8, 9} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DOUBLE SET)") + So(res.GetDataType(), ShouldEqual, model.DtDouble) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_date(t *testing.T) { + Convey("Test_set_date_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_date:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, time1) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DATE SET)") + So(res.GetDataType(), ShouldEqual, model.DtDate) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_month(t *testing.T) { + Convey("Test_set_month_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_month:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtMonth, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, time1) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(MONTH SET)") + So(res.GetDataType(), ShouldEqual, model.DtMonth) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_time(t *testing.T) { + Convey("Test_set_time_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_time:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, time1) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(TIME SET)") + So(res.GetDataType(), ShouldEqual, model.DtTime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_minute(t *testing.T) { + Convey("Test_set_minute_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_minute:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtMinute, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, time1) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(MINUTE SET)") + So(res.GetDataType(), ShouldEqual, model.DtMinute) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_second(t *testing.T) { + Convey("Test_set_second_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_second:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtSecond, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, time1) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(SECOND SET)") + So(res.GetDataType(), ShouldEqual, model.DtSecond) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_datetime(t *testing.T) { + Convey("Test_set_datetime_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_datetime:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtDatetime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, time1) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DATETIME SET)") + So(res.GetDataType(), ShouldEqual, model.DtDatetime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_timestamp(t *testing.T) { + Convey("Test_set_timestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_timestamp:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, time1) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(TIMESTAMP SET)") + So(res.GetDataType(), ShouldEqual, model.DtTimestamp) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_nanotime(t *testing.T) { + Convey("Test_set_nanotime_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_nanotime:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtNanoTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, time1) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(NANOTIME SET)") + So(res.GetDataType(), ShouldEqual, model.DtNanoTime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_nanotimestamp(t *testing.T) { + Convey("Test_set_nanotimestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_nanotimestamp:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtNanoTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, time1) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(NANOTIMESTAMP SET)") + So(res.GetDataType(), ShouldEqual, model.DtNanoTimestamp) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_datehour(t *testing.T) { + Convey("Test_set_datehour_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_datehour:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtDateHour, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, time1) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(DATEHOUR SET)") + So(res.GetDataType(), ShouldEqual, model.DtDateHour) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_point(t *testing.T) { + Convey("Test_set_point_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_point:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtPoint, [][2]float64{{1, 1}, {-1, -1024.5}, {1001022.4, -30028.75}}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []string{"(1.00000, 1.00000)", "(-1.00000, -1024.50000)", "(1001022.40000, -30028.75000)"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(POINT SET)") + So(res.GetDataType(), ShouldEqual, model.DtPoint) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_complex(t *testing.T) { + Convey("Test_set_complex_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_complex:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtComplex, [][2]float64{{1, 1}, {-1, -1024.5}, {1001022.4, -30028.75}}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []string{"1.00000+1.00000i", "-1.00000+-1024.50000i", "1001022.40000+-30028.75000i"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(COMPLEX SET)") + So(res.GetDataType(), ShouldEqual, model.DtComplex) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_string(t *testing.T) { + Convey("Test_set_string_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_string:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtString, []string{"hello", "#$%", "数据类型", "what"}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []string{"hello", "#$%", "数据类型", "what"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(STRING SET)") + So(res.GetDataType(), ShouldEqual, model.DtString) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_blob(t *testing.T) { + Convey("Test_set_blob_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_blob:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtBlob, [][]byte{{6}, {12}, {56}, {128}}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := [][]uint8{{6}, {12}, {56}, {128}} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(BLOB SET)") + So(res.GetDataType(), ShouldEqual, model.DtBlob) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_uuid(t *testing.T) { + Convey("Test_set_uuid_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_uuid:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtUUID, []string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee88", "5d212a78-cc48-e3b1-4235-b4d91473ee89"}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee88", "5d212a78-cc48-e3b1-4235-b4d91473ee89"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(UUID SET)") + So(res.GetDataType(), ShouldEqual, model.DtUUID) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_ipaddr(t *testing.T) { + Convey("Test_set_ipaddr_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_ipaddr:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtIP, []string{"192.163.1.12", "0.0.0.0", "127.0.0.1"}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []string{"192.163.1.12", "0.0.0.0", "127.0.0.1"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(IPADDR SET)") + So(res.GetDataType(), ShouldEqual, model.DtIP) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_int128(t *testing.T) { + Convey("Test_set_int128_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_set_int128:", func() { + data, _ := model.NewDataTypeListWithRaw(model.DtInt128, []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33", "e1671797c52e15f763380b45e841ec34"}) + set := model.NewSet(model.NewVector(data)) + _, err := db.Upload(map[string]model.DataForm{"s": set}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set).Vector.Data.Value() + zx := []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33", "e1671797c52e15f763380b45e841ec34"} + for j := 0; j < len(re); j++ { + So(re[j], ShouldBeIn, zx) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(INT128 SET)") + So(res.GetDataType(), ShouldEqual, model.DtInt128) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Set_UpLoad_DataType_big_array(t *testing.T) { + Convey("Test_set_big_array_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + var i int32 + intv := []int32{} + for i = 0; i < 3000000*12; i += 12 { + intv = append(intv, i) + } + intv = append(intv, model.NullInt) + col, err := model.NewDataTypeListWithRaw(model.DtInt, intv) + So(err, ShouldBeNil) + set := model.NewSet(model.NewVector(col)) + _, err = db.Upload(map[string]model.DataForm{"s": set}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Set) + So(re.Vector.ColumnCount, ShouldEqual, 1) + So(re.Vector.RowCount, ShouldEqual, 3000001) + So(ty.String(), ShouldEqual, "string(INT SET)") + So(res.GetDataType(), ShouldEqual, model.DtInt) + So(db.Close(), ShouldBeNil) + }) +} diff --git a/test/basicTypeTest/basicTable_test.go b/test/basicTypeTest/basicTable_test.go new file mode 100644 index 0000000..9c96457 --- /dev/null +++ b/test/basicTypeTest/basicTable_test.go @@ -0,0 +1,2727 @@ +package test + +import ( + "context" + "math" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Table_DownLoad_DataType_string(t *testing.T) { + Convey("Test_Table_with_string:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_string_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C`MS`MSFT`JPM`ORCL;em = 97c 102c 97c 100c 99c 98c;zx=true false false true false true;ax = symbol(`A`B`C`D`E`F);sh = 11h 12h 13h 14h 15h 16h;m=table(09:30:00+take(100..105,n) as second,take(syms,n) as sym, 10.0*(1+take(100..105,n)) as qty,5.0+take(100..105,n) as price, 5l*(1+take(100..105,n)) as long, 6f*(1+take(100..105,n)) as float,take(em,n) as char,take(zx,n) as bool, take(ax,n) as symbol,take(sh,6) as short);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("sym").Data.Value() + zx := [6]string{"IBM", "C", "MS", "MSFT", "JPM", "ORCL"} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + form := result.GetDataForm() + So(form, ShouldEqual, 6) + reType := result.GetDataType() + So(reType, ShouldEqual, 0) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "void") + row := result.Rows() + col := result.Columns() + So(row, ShouldEqual, 6) + So(col, ShouldEqual, 10) + ids := []int{0} + sub := result.GetSubtable(ids) + So(sub, ShouldNotBeNil) + }) + Convey("Test_Table_with_string_has_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C``MSFT``ORCL;m=table(2022.08.03+take(100..105,n) as date,take(syms,n) as sym, 2012.08M+take(100..105,n) as month, 09:30:00.000+take(100..105,n) as time, 09:30m+take(100..105,n) as minute );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("sym").Data.Value() + zx := [6]string{"IBM", "C", "", "MSFT", "", "ORCL"} + for i := 0; i < len(get); i++ { + So(get[i], ShouldEqual, zx[i]) + } + }) + Convey("Test_Table_only_one_string_columns:", func() { + s, err := db.RunScript("em = `IBM`C`MS`MSFT`JPM`ORCL;m = table(take(em,6) as string);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("string").Data.Value() + zx := [6]string{"IBM", "C", "MS", "MSFT", "JPM", "ORCL"} + for i := 0; i < len(get); i++ { + So(get[i], ShouldEqual, zx[i]) + } + }) + Convey("Test_Table_only_one_string_null_columns:", func() { + s, err := db.RunScript("em = ``````;m = table(take(em,6) as string_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("string_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("string_null").IsNull(i), ShouldEqual, true) + } + }) + Convey("Test_Table_with_string_all_null:", func() { + s, err := db.RunScript("n=6;syms=``````;m=table(2022.08.03 11:00:00+take(10..15,n) as datetime,take(syms,n) as sym, 2012.08.03 11:00:00.000+take(100..105,n) as timestamp, 11:00:00.000000000+take(100..105,n) as nanotime, 2022.08.03 11:00:00.000000000+take(100..105,n) as nanotimestamp );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("sym").Data.Value() + zx := [6]string{"", "", "", "", "", ""} + for i := 0; i < len(get); i++ { + So(get[i], ShouldEqual, zx[i]) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_bool(t *testing.T) { + Convey("Test_Table_with_bool:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_bool_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C`MS`MSFT`JPM`ORCL;em = 97c 102c 97c 100c 99c 98c;zx=true false false true false true;ax = symbol(`A`B`C`D`E`F);sh = 11h 12h 13h 14h 15h 16h;m=table(09:30:00+take(100..105,n) as second,take(syms,n) as sym, 10.0*(1+take(100..105,n)) as qty,5.0+take(100..105,n) as price, 5l*(1+take(100..105,n)) as long, 6f*(1+take(100..105,n)) as float,take(em,n) as char,take(zx,n) as bool, take(ax,n) as symbol,take(sh,6) as short);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("bool").Data.Value() + zx := [6]bool{true, false, false, true, false, true} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_bool_columns:", func() { + s, err := db.RunScript("em = true false false true false true;m = table(take(em,6) as bool);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("bool").Data.Value() + zx := [6]bool{true, false, false, true, false, true} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_bool_null_columns:", func() { + s, err := db.RunScript("m = table(take(00b,6) as bool_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("bool_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("bool_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_symbol(t *testing.T) { + Convey("Test_Table_with_symbol:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_symbol_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C`MS`MSFT`JPM`ORCL;em = 97c 102c 97c 100c 99c 98c;zx=true false false true false true;ax = symbol(`A`B`C`D`E`F);sh = 11h 12h 13h 14h 15h 16h;m=table(09:30:00+take(100..105,n) as second,take(syms,n) as sym, 10.0*(1+take(100..105,n)) as qty,5.0+take(100..105,n) as price, 5l*(1+take(100..105,n)) as long, 6f*(1+take(100..105,n)) as float,take(em,n) as char,take(zx,n) as bool, take(ax,n) as symbol,take(sh,6) as short);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("symbol").Data.Value() + zx := [6]string{"A", "B", "C", "D", "E", "F"} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_symbol_columns:", func() { + s, err := db.RunScript("em = symbol(`A`B`C`D`E`F);m = table(take(em,6) as symbol);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("symbol").Data.Value() + zx := [6]string{"A", "B", "C", "D", "E", "F"} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_symbol_null_columns:", func() { + s, err := db.RunScript("em = symbol(['','','','','','']);m = table(take(em,6) as symbol_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("symbol_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("symbol_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_double(t *testing.T) { + Convey("Test_Table_with_double:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_double_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C`MS`MSFT`JPM`ORCL;em = 97c 102c 97c 100c 99c 98c;zx=true false false true false true;ax = symbol(`A`B`C`D`E`F);sh = 11h 12h 13h 14h 15h 16h;m=table(09:30:00+take(100..105,n) as second,take(syms,n) as sym, 10.0*(1+take(100..105,n)) as qty,5.0+take(100..105,n) as price, 5l*(1+take(100..105,n)) as long, 6f*(1+take(100..105,n)) as float,take(em,n) as char,take(zx,n) as bool, take(ax,n) as symbol,take(sh,6) as short);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("qty").Data.Value() + zx := [6]float64{1010, 1020, 1030, 1040, 1050, 1060} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_double_columns:", func() { + s, err := db.RunScript("m = table(10.0*(1+take(100..105,6)) as double);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("double").Data.Value() + zx := [6]float64{1010, 1020, 1030, 1040, 1050, 1060} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_double_null_columns:", func() { + s, err := db.RunScript("m = table(10.0+double(['','','','','','']) as double_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("double_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("double_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_int(t *testing.T) { + Convey("Test_Table_with_int:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_int_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C`MS`MSFT`JPM`ORCL;em = 97c 102c 97c 100c 99c 98c;zx=true false false true false true;ax = symbol(`A`B`C`D`E`F);sh = 11h 12h 13h 14h 15h 16h;m=table(09:30:00+take(100..105,n) as second,take(syms,n) as sym, 10.0*(1+take(100..105,n)) as qty,5+take(100..105,n) as price, 5l*(1+take(100..105,n)) as long, 6f*(1+take(100..105,n)) as float,take(em,n) as char,take(zx,n) as bool, take(ax,n) as symbol,take(sh,6) as short);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("price").Data.Value() + zx := [6]int32{105, 106, 107, 108, 109, 110} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_int_columns:", func() { + s, err := db.RunScript("m = table(10+take(10..15,6) as int);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("int").Data.Value() + zx := [6]int32{20, 21, 22, 23, 24, 25} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_int_null_columns:", func() { + s, err := db.RunScript("m = table(10+take(00i,6) as int_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("int_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("int_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_float(t *testing.T) { + Convey("Test_Table_with_float:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_float_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C`MS`MSFT`JPM`ORCL;em = 97c 102c 97c 100c 99c 98c;zx=true false false true false true;ax = symbol(`A`B`C`D`E`F);sh = 11h 12h 13h 14h 15h 16h;m=table(09:30:00+take(100..105,n) as second,take(syms,n) as sym, 10.0*(1+take(100..105,n)) as qty,5.0+take(100..105,n) as price, 5l*(1+take(100..105,n)) as long, 6f*(1+take(100..105,n)) as float,take(em,n) as char,take(zx,n) as bool, take(ax,n) as symbol,take(sh,6) as short);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("float").Data.Value() + zx := [6]float32{606, 612, 618, 624, 630, 636} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_float_columns:", func() { + s, err := db.RunScript("m = table(10f*(1+take(10..15,6)) as float);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("float").Data.Value() + zx := [6]float32{110, 120, 130, 140, 150, 160} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_float_null_columns:", func() { + s, err := db.RunScript("m = table(10+take(00f,6) as float_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("float_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("float_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_long(t *testing.T) { + Convey("Test_Table_with_long:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_long_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C`MS`MSFT`JPM`ORCL;em = 97c 102c 97c 100c 99c 98c;zx=true false false true false true;ax = symbol(`A`B`C`D`E`F);sh = 11h 12h 13h 14h 15h 16h;m=table(09:30:00+take(100..105,n) as second,take(syms,n) as sym, 10.0*(1+take(100..105,n)) as qty,5.0+take(100..105,n) as price, 5l*(1+take(100..105,n)) as long, 6f*(1+take(100..105,n)) as float,take(em,n) as char,take(zx,n) as bool, take(ax,n) as symbol,take(sh,6) as short);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("long").Data.Value() + zx := [6]int64{505, 510, 515, 520, 525, 530} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_long_columns:", func() { + s, err := db.RunScript("m = table(10l+take(10..15,6) as long);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("long").Data.Value() + zx := [6]int64{20, 21, 22, 23, 24, 25} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_long_null_columns:", func() { + s, err := db.RunScript("m = table(10+take(00l,6) as long_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("long_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("long_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_char(t *testing.T) { + Convey("Test_Table_with_char:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_char_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C`MS`MSFT`JPM`ORCL;em = 97c 102c 97c 100c 99c 98c;zx=true false false true false true;ax = symbol(`A`B`C`D`E`F);sh = 11h 12h 13h 14h 15h 16h;m=table(09:30:00+take(100..105,n) as second,take(syms,n) as sym, 10.0*(1+take(100..105,n)) as qty,5.0+take(100..105,n) as price, 5l*(1+take(100..105,n)) as long, 6f*(1+take(100..105,n)) as float,take(em,n) as char,take(zx,n) as bool, take(ax,n) as symbol,take(sh,6) as short);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("char").Data.Value() + zx := [6]uint8{97, 102, 97, 100, 99, 98} + for i := 0; i < len(get); i++ { + So(get[i], ShouldEqual, zx[i]) + } + }) + Convey("Test_Table_only_one_char_columns:", func() { + s, err := db.RunScript("em = 97c 102c 97c 100c 99c 98c;m = table(take(em,6) as char);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("char").Data.Value() + zx := [6]uint{97, 102, 97, 100, 99, 98} + for i := 0; i < len(get); i++ { + So(get[i], ShouldEqual, zx[i]) + } + }) + Convey("Test_Table_only_one_char_null_columns:", func() { + s, err := db.RunScript("em = char(['','','','','','']);m = table(take(em,6) as char_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("char_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("char_null").IsNull(i), ShouldEqual, true) + } + So(result.String(), ShouldNotBeNil) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_short(t *testing.T) { + Convey("Test_Table_with_short:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_short_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C`MS`MSFT`JPM`ORCL;em = 97c 102c 97c 100c 99c 98c;zx=true false false true false true;ax = symbol(`A`B`C`D`E`F);sh = 11h 12h 13h 14h 15h 16h;m=table(09:30:00+take(100..105,n) as second,take(syms,n) as sym, 10.0*(1+take(100..105,n)) as qty,5.0+take(100..105,n) as price, 5l*(1+take(100..105,n)) as long, 6f*(1+take(100..105,n)) as float,take(em,n) as char,take(zx,n) as bool, take(ax,n) as symbol,take(sh,6) as short);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("short").Data.Value() + zx := [6]int16{11, 12, 13, 14, 15, 16} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_short_columns:", func() { + s, err := db.RunScript("sh = 11h 12h 13h 14h 15h 16h;m = table(take(sh,6) as short);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("short").Data.Value() + zx := [6]int16{11, 12, 13, 14, 15, 16} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_short_null_columns:", func() { + s, err := db.RunScript("sh = short(['','','','','','']);m = table(take(sh,6) as short_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("short_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("short_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_date(t *testing.T) { + Convey("Test_Table_with_date:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_date_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C``MSFT``ORCL;m=table(1969.12.31 1970.01.01 1970.01.02 2006.01.02 2006.01.03 2022.08.03 as date,take(syms,n) as sym, 2012.08M+take(100..105,n) as month, 09:30:00.000+take(100..105,n) as time, 09:30m+take(100..105,n) as minute );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("date").Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 2, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 0, 0, 0, 0, time.UTC), time.Date(2022, 8, 3, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_date_columns:", func() { + s, err := db.RunScript("m = table(2022.08.03+take(100..105,6) as date);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("date").Data.Value() + time1 := []time.Time{time.Date(2022, 11, 11, 0, 0, 0, 0, time.UTC), time.Date(2022, 11, 12, 0, 0, 0, 0, time.UTC), time.Date(2022, 11, 13, 0, 0, 0, 0, time.UTC), time.Date(2022, 11, 14, 0, 0, 0, 0, time.UTC), time.Date(2022, 11, 15, 0, 0, 0, 0, time.UTC), time.Date(2022, 11, 16, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_date_null_columns:", func() { + s, err := db.RunScript("m = table(10+take(00d,6) as date_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("date_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("date_null").IsNull(i), ShouldEqual, true) + } + idex := result.GetColumnByIndex(15) + So(idex, ShouldBeNil) + byna := result.GetColumnByName("hello") + So(byna, ShouldBeNil) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_month(t *testing.T) { + Convey("Test_Table_with_month:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_month_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C``MSFT``ORCL;m=table(2022.08.03+take(100..105,n) as date,take(syms,n) as sym, 1969.12M 1970.01M 1970.02M 2006.01M 2006.02M 2022.08M as month, 09:30:00.000+take(100..105,n) as time, 09:30m+take(100..105,n) as minute );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("month").Data.Value() + time1 := []time.Time{time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 2, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 2, 1, 0, 0, 0, 0, time.UTC), time.Date(2022, 8, 1, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_month_columns:", func() { + s, err := db.RunScript("n=6;m = table(2012.08M+take(100..105,n) as month);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("month").Data.Value() + time1 := []time.Time{time.Date(2020, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2021, 2, 1, 0, 0, 0, 0, time.UTC), time.Date(2021, 3, 1, 0, 0, 0, 0, time.UTC), time.Date(2021, 4, 1, 0, 0, 0, 0, time.UTC), time.Date(2021, 5, 1, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_month_null_columns:", func() { + s, err := db.RunScript("m = table(10+month(['','','','','','']) as month_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("month_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("month_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_time(t *testing.T) { + Convey("Test_Table_with_time:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_time_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C``MSFT``ORCL;m=table(2022.08.03+take(100..105,n) as date,take(syms,n) as sym, 2012.08M+take(100..105,n) as month, 23:59:59.999 00:00:00.000 00:00:01.999 15:04:04.999 15:04:05.000 15:00:15.000 as time, 09:30m+take(100..105,n) as minute );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("time").Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_time_columns:", func() { + s, err := db.RunScript("n=6;m = table(23:59:59.999 00:00:00.000 00:00:01.999 15:04:04.999 15:04:05.000 15:00:15.000 as time);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("time").Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_time_null_columns:", func() { + s, err := db.RunScript("m = table(10+time(['','','','','','']) as time_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("time_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("time_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_minute(t *testing.T) { + Convey("Test_Table_with_minute:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_minute_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C``MSFT``ORCL;m=table(2022.08.03+take(100..105,n) as date,take(syms,n) as sym, 2012.08M+take(100..105,n) as month, 09:30:00.000+take(100..105,n) as time, 23:59m 00:00m 00:01m 15:04m 15:05m 15:15m as minute );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("minute").Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 1, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 5, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 15, 0, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_minute_columns:", func() { + s, err := db.RunScript("n=6; m = table(23:59m 00:00m 00:01m 15:04m 15:05m 15:15m as minute );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("minute").Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 1, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 5, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 15, 0, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_minute_null_columns:", func() { + s, err := db.RunScript("m = table(10+take(00m,6) as minute_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("minute_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("minute_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_second(t *testing.T) { + Convey("Test_Table_with_second:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_second_not_null:", func() { + s, err := db.RunScript("n=6;syms=`IBM`C`MS`MSFT`JPM`ORCL;em = 97c 102c 97c 100c 99c 98c;zx=true false false true false true;ax = symbol(`A`B`C`D`E`F);sh = 11h 12h 13h 14h 15h 16h;m=table(23:59:59 00:00:00 00:00:01 15:04:04 15:04:05 15:00:15 as second,take(syms,n) as sym, 10.0*(1+take(100..105,n)) as qty,5.0+take(100..105,n) as price, 5l*(1+take(100..105,n)) as long, 6f*(1+take(100..105,n)) as float,take(em,n) as char,take(zx,n) as bool, take(ax,n) as symbol,take(sh,6) as short);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("second").Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_second_columns:", func() { + s, err := db.RunScript("m = table(23:59:59 00:00:00 00:00:01 15:04:04 15:04:05 15:00:15 as second );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("second").Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_second_null_columns:", func() { + s, err := db.RunScript("m = table(10+take(00s,6) as second_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("second_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("second_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_datetime(t *testing.T) { + Convey("Test_Table_with_datetime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_datetime_not_null:", func() { + s, err := db.RunScript("n=6;syms=``````;m=table(1969.12.31T23:59:59 1970.01.01T00:00:00 1970.01.01T00:00:01 2006.01.02T15:04:04 2006.01.02T15:04:05 2022.08.03T15:00:15 as datetime,take(syms,n) as sym, 2012.08.03 11:00:00.000+take(100..105,n) as timestamp, 11:00:00.000000000+take(100..105,n) as nanotime, 2022.08.03 11:00:00.000000000+take(100..105,n) as nanotimestamp );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("datetime").Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_datetime_columns:", func() { + s, err := db.RunScript("m = table(1969.12.31T23:59:59 1970.01.01T00:00:00 1970.01.01T00:00:01 2006.01.02T15:04:04 2006.01.02T15:04:05 2022.08.03T15:00:15 as datetime);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("datetime").Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_datetime_null_columns:", func() { + s, err := db.RunScript("m = table(10+datetime(['','','','','','']) as datetime_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("datetime_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("datetime_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_timestamp(t *testing.T) { + Convey("Test_Table_with_timestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_timestamp_not_null:", func() { + s, err := db.RunScript("n=6;syms=``````;m=table(2022.08.03 11:00:00+take(10..15,n) as datetime,take(syms,n) as sym, 1969.12.31T23:59:59.999 1970.01.01T00:00:00.000 1970.01.01T00:00:01.999 2006.01.02T15:04:04.999 2006.01.02T15:04:05.000 2022.08.03T15:00:15.000 as timestamp, 11:00:00.000000000+take(100..105,n) as nanotime, 2022.08.03 11:00:00.000000000+take(100..105,n) as nanotimestamp );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("timestamp").Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_timestamp_columns:", func() { + s, err := db.RunScript("m = table(1969.12.31T23:59:59.999 1970.01.01T00:00:00.000 1970.01.01T00:00:01.999 2006.01.02T15:04:04.999 2006.01.02T15:04:05.000 2022.08.03T15:00:15.000 as timestamp);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("timestamp").Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_timestamp_null_columns:", func() { + s, err := db.RunScript("m = table(10+timestamp(['','','','','','']) as timestamp_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("timestamp_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("timestamp_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_nanotime(t *testing.T) { + Convey("Test_Table_with_nanotime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_nanotime_not_null:", func() { + s, err := db.RunScript("n=6;syms=``````;m=table(2022.08.03 11:00:00+take(10..15,n) as datetime,take(syms,n) as sym, 2012.08.03 11:00:00.000+take(100..105,n) as timestamp, 23:59:59.999999999 00:00:00.000000000 00:00:01.999999999 15:04:04.999999999 15:04:05.000000000 15:00:15.000000000 as nanotime, 2022.08.03 11:00:00.000000000+take(100..105,n) as nanotimestamp );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("nanotime").Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_nanotime_columns:", func() { + s, err := db.RunScript("m = table(23:59:59.999999999 00:00:00.000000000 00:00:01.999999999 15:04:04.999999999 15:04:05.000000000 15:00:15.000000000 as nanotime);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("nanotime").Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_nanotime_null_columns:", func() { + s, err := db.RunScript("m = table(10+nanotime(['','','','','','']) as nanotime_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("nanotime_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("nanotime_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_nanotimestamp(t *testing.T) { + Convey("Test_Table_with_nanotimestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_nannotimestamp_not_null:", func() { + s, err := db.RunScript("n=6;syms=``````;m=table(2022.08.03 11:00:00+take(10..15,n) as datetime,take(syms,n) as sym, 2012.08.03 11:00:00.000+take(100..105,n) as timestamp, 11:00:00.000000000+take(100..105,n) as nanotime, 1969.12.31T23:59:59.999999999 1970.01.01T00:00:00.000000000 1970.01.01T00:00:01.999999999 2006.01.02T15:04:04.999999999 2006.01.02T15:04:05.000000000 2022.08.03T15:00:15.000000000 as nanotimestamp );m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("nanotimestamp").Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_nanotimestamp_columns:", func() { + s, err := db.RunScript("m = table(1969.12.31T23:59:59.999999999 1970.01.01T00:00:00.000000000 1970.01.01T00:00:01.999999999 2006.01.02T15:04:04.999999999 2006.01.02T15:04:05.000000000 2022.08.03T15:00:15.000000000 as nanotimestamp);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("nanotimestamp").Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_nanotimestamp_null_columns:", func() { + s, err := db.RunScript("m = table(10+nanotimestamp(['','','','','','']) as nanotimestamp_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("nanotimestamp_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("nanotimestamp_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_datehour(t *testing.T) { + Convey("Test_Table_with_datehour:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_datehour_not_null:", func() { + s, err := db.RunScript("ax = uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89']);bx =datehour[1969.12.31T23:59:59.999, 1970.01.01T00:00:00.000, 2006.01.02T15:04:04.999];cx = ipaddr(['461c:7fa1:7f3c:7249:5278:c610:f595:d174', '3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72', '127e:eeed:1b16:20a9:1694:6185:f045:fb9a']);dx = ipaddr(['192.168.1.135', '192.168.1.124', '192.168.1.14']);zx = int128(['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34']); m = table(take(ax,3) as uuid, take(bx,3) as datehour,take(cx,3) as ipaddr,take(dx,3) as ipaddr123,take(zx,3) as int128);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("datehour").Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_datehour_columns:", func() { + s, err := db.RunScript("bx =datehour[1969.12.31T23:59:59.999, 1970.01.01T00:00:00.000, 2006.01.02T15:04:04.999];m = table(take(bx,3) as datehour);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("datehour").Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(get); i++ { + if get[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_datehour_null_columns:", func() { + s, err := db.RunScript("m = table(10.0+datehour(['','','','','','']) as datehour_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("datehour_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("datehour_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_uuid(t *testing.T) { + Convey("Test_Table_with_uuid:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_uuid_not_null:", func() { + s, err := db.RunScript("ax = uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89']);bx =datehour([2022.07.29 15:00:00.000, 2022.07.29 16:00:00.000, 2022.07.29 17:00:00.000]);cx = ipaddr(['461c:7fa1:7f3c:7249:5278:c610:f595:d174', '3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72', '127e:eeed:1b16:20a9:1694:6185:f045:fb9a']);dx = ipaddr(['192.168.1.135', '192.168.1.124', '192.168.1.14']);zx = int128(['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34']); m = table(take(ax,3) as uuid, take(bx,3) as datehour,take(cx,3) as ipaddr,take(dx,3) as ipaddr123,take(zx,3) as int128);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("uuid").Data.Value() + zx := [3]string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee88", "5d212a78-cc48-e3b1-4235-b4d91473ee89"} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_uuid_columns:", func() { + s, err := db.RunScript("ax = uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89']);m = table(take(ax,3) as uuid);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("uuid").Data.Value() + zx := [3]string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee88", "5d212a78-cc48-e3b1-4235-b4d91473ee89"} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_uuid_null_columns:", func() { + s, err := db.RunScript("m = table(uuid(['','','','','','']) as uuid_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("uuid_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("uuid_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_ipaddr(t *testing.T) { + Convey("Test_Table_with_ipaddr:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_ipaddr_not_null:", func() { + s, err := db.RunScript("ax = uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89']);bx =datehour([2022.07.29 15:00:00.000, 2022.07.29 16:00:00.000, 2022.07.29 17:00:00.000]);cx = ipaddr(['461c:7fa1:7f3c:7249:5278:c610:f595:d174', '3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72', '127e:eeed:1b16:20a9:1694:6185:f045:fb9a']);dx = ipaddr(['192.168.1.135', '192.168.1.124', '192.168.1.14']);zx = int128(['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34']); m = table(take(ax,3) as uuid, take(bx,3) as datehour,take(cx,3) as ipaddr,take(dx,3) as ipaddr123,take(zx,3) as int128);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("ipaddr").Data.Value() + zx := [3]string{"461c:7fa1:7f3c:7249:5278:c610:f595:d174", "3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72", "127e:eeed:1b16:20a9:1694:6185:f045:fb9a"} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + Convey("Test_Table_ipaddr_num: ", func() { + get := result.GetColumnByName("ipaddr123").Data.Value() + zx := [3]string{"192.168.1.135", "192.168.1.124", "192.168.1.14"} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_ipaddr_columns:", func() { + s, err := db.RunScript("cx = ipaddr(['461c:7fa1:7f3c:7249:5278:c610:f595:d174', '3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72', '127e:eeed:1b16:20a9:1694:6185:f045:fb9a']);m = table(take(cx,3) as ipaddr);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("ipaddr").Data.Value() + zx := [3]string{"461c:7fa1:7f3c:7249:5278:c610:f595:d174", "3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72", "127e:eeed:1b16:20a9:1694:6185:f045:fb9a"} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_ipaddr_null_columns:", func() { + s, err := db.RunScript("m = table(ipaddr(['','','','','','']) as ipaddr_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("ipaddr_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("ipaddr_null").IsNull(i), ShouldBeTrue) + } + }) + So(db.Close(), ShouldBeNil) + }) + }) +} +func Test_Table_DownLoad_DataType_int128(t *testing.T) { + Convey("Test_Table_with_int128:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_int128_not_null:", func() { + s, err := db.RunScript("ax = uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89']);bx =datehour([2022.07.29 15:00:00.000, 2022.07.29 16:00:00.000, 2022.07.29 17:00:00.000]);cx = ipaddr(['461c:7fa1:7f3c:7249:5278:c610:f595:d174', '3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72', '127e:eeed:1b16:20a9:1694:6185:f045:fb9a']);dx = ipaddr(['192.168.1.135', '192.168.1.124', '192.168.1.14']);zx = int128(['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34']); m = table(take(ax,3) as uuid, take(bx,3) as datehour,take(cx,3) as ipaddr,take(dx,3) as ipaddr123,take(zx,3) as int128);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("int128").Data.Value() + zx := [3]string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33", "e1671797c52e15f763380b45e841ec34"} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_int128_columns:", func() { + s, err := db.RunScript("zx = int128(['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34']);m = table(take(zx,3) as int128);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("int128").Data.Value() + zx := [3]string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33", "e1671797c52e15f763380b45e841ec34"} + var k int + for i := 0; i < len(get); i++ { + if get[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + }) + Convey("Test_Table_only_one_int128_null_columns:", func() { + s, err := db.RunScript("m = table(10.0+int128(['','','','','','']) as int128_null);m") + So(err, ShouldBeNil) + result := s.(*model.Table) + get := result.GetColumnByName("int128_null").Data.Value() + for i := 0; i < len(get); i++ { + So(result.GetColumnByName("int128_null").IsNull(i), ShouldEqual, true) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_big_size(t *testing.T) { + Convey("Test_Table_size_bigger_than_1024:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("table1 = table(1..2048 as id, take(symbol((`A)+string(1..10)),2048) as name, double(rand(3892,2048)) as value);select * from table1") + So(err, ShouldBeNil) + result := s.(*model.Table) + row := result.Rows() + So(row, ShouldEqual, 2048) + So(db.Close(), ShouldBeNil) + }) + Convey("Test_Table_size_bigger_than_1048576:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("n = 2*1048576; table1 = table(1..n as id, take(symbol((`A)+string(1..10)),n) as name, double(rand(3892,n)) as value);select * from table1") + So(err, ShouldBeNil) + result := s.(*model.Table) + row := result.Rows() + So(row, ShouldEqual, 2097152) + So(db.Close(), ShouldBeNil) + }) + Convey("Test_Table_size_bigger_than_3000000:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("n = 4000000; table1 = table(1..n as id, take(symbol((`A)+string(1..10)),n) as name, double(rand(3892,n)) as value);select * from table1") + So(err, ShouldBeNil) + result := s.(*model.Table) + row := result.Rows() + So(row, ShouldEqual, 4000000) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_zero_row(t *testing.T) { + Convey("Test_Table_with_one_row:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("create table table1(id INT,name SYMBOL,value DOUBLE);go;select * from table1") + So(err, ShouldBeNil) + result := s.(*model.Table) + row := result.Rows() + So(row, ShouldEqual, 0) + So(result.GetColumnByName("id").IsNull(0), ShouldBeTrue) + So(result.GetColumnByName("name").IsNull(0), ShouldBeTrue) + So(result.GetColumnByName("value").IsNull(0), ShouldBeTrue) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_DataType_one_row(t *testing.T) { + Convey("Test_Table_with_one_row:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("create table table1(id INT,name SYMBOL,value DOUBLE);go;insert into table1 values(1,`A,12.4);select * from table1") + So(err, ShouldBeNil) + result := s.(*model.Table) + row := result.Rows() + So(row, ShouldEqual, 1) + id := result.GetColumnByName("id").Data.Value() + name := result.GetColumnByName("name").Data.Value() + value := result.GetColumnByName("value").Data.Value() + So(id[0], ShouldEqual, 1) + So(name[0], ShouldEqual, "A") + So(value[0], ShouldEqual, 12.4) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_Distributed_table(t *testing.T) { + Convey("Test_Table_distributed_table:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("dbName = 'dfs://Valuedb';if(existsDatabase(dbName)){dropDatabase(dbName)};n = 6;datetimev = take(2022.01.03T12:00:00+1..n, n);num = take(0.0+1..n, n);name = take(`a`b`c, n);boolv = take(true false true, n);uuidv = take(uuid('a268652a-6c8e-5686-5dd9-4ab882ecb969'), n);ipaddrv = take(ipaddr('191.168.13.16'), n);int128v = take(int128('97b48f09119a1d91d44fd12893226af8'), n);pointv = take(point(0.0+1..n,100.0+1..n), n);complexv = take(complex(0.0+1..n,100.0+1..n), n);t=table(datetimev, num, name, boolv, uuidv, ipaddrv, int128v, pointv, complexv);db=database('dfs://Valuedb', VALUE, `a`b`c);pt=db.createPartitionedTable(t, `pt, `name).append!(t);select * from pt;") + So(err, ShouldBeNil) + result := s.(*model.Table) + datetimev := []time.Time{time.Date(2022, 1, 3, 12, 00, 1, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 4, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 2, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 5, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 3, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 6, 0, time.UTC)} + num := []float64{1, 4, 2, 5, 3, 6} + name := []string{"a", "a", "b", "b", "c", "c"} + boolv := []bool{true, true, false, false, true, true} + uuidv := string("a268652a-6c8e-5686-5dd9-4ab882ecb969") + ipaddrv := string("191.168.13.16") + int128v := string("97b48f09119a1d91d44fd12893226af8") + pointv := []string{"(1.00000, 101.00000)", "(4.00000, 104.00000)", "(2.00000, 102.00000)", "(5.00000, 105.00000)", "(3.00000, 103.00000)", "(6.00000, 106.00000)"} + complexv := []string{"1.00000+101.00000i", "4.00000+104.00000i", "2.00000+102.00000i", "5.00000+105.00000i", "3.00000+103.00000i", "6.00000+106.00000i"} + col := result.Columns() + So(col, ShouldEqual, 9) + row := result.Rows() + So(row, ShouldEqual, 6) + for i := 0; i < result.Columns(); i++ { + re := result.GetColumnByIndex(i) + switch i { + case 0: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, datetimev[j]) + } + } + case 1: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, num[j]) + } + } + case 2: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, name[j]) + } + } + case 3: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, boolv[j]) + } + } + case 4: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, uuidv) + } + } + case 5: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, ipaddrv) + } + } + case 6: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, int128v) + } + } + case 7: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, pointv[j]) + } + } + case 8: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, complexv[j]) + } + } + } + } + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_Dimension_table(t *testing.T) { + Convey("Test_Table_dimension_table:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("dbName = 'dfs://db1';if(existsDatabase(dbName)){dropDatabase(dbName)};db=database('dfs://db1',VALUE,1 2 3);n = 6;timev = take(2022.01.03T12:00:00+1..n, n);num = take(1023.002+1..n, n);name = take(`a`b`c`d`e`f, n);boolv = take(true false , n);uuidv = take(uuid('a268652a-6c8e-5686-5dd9-4ab882ecb969'), n);ipaddrv = take(ipaddr('191.168.13.16'), n);int128v = take(int128('97b48f09119a1d91d44fd12893226af8'), n);pointv = take(point(0.0+1..n,100.0+1..n), n);complexv = take(complex(0.0+1..n,100.0+1..n), n);t=table(timev, num, name, boolv, uuidv, ipaddrv, int128v, pointv, complexv);dt=db.createTable(t,`dt).append!(t);select * from dt") + So(err, ShouldBeNil) + result := s.(*model.Table) + datetimev := []time.Time{time.Date(2022, 1, 3, 12, 00, 1, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 2, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 3, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 4, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 5, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 6, 0, time.UTC)} + num := []float64{1024.002, 1025.002, 1026.002, 1027.002, 1028.002, 1029.002} + name := []string{"a", "b", "c", "d", "e", "f"} + boolv := []bool{true, false, true, false, true, false} + uuidv := string("a268652a-6c8e-5686-5dd9-4ab882ecb969") + ipaddrv := string("191.168.13.16") + int128v := string("97b48f09119a1d91d44fd12893226af8") + pointv := []string{"(1.00000, 101.00000)", "(2.00000, 102.00000)", "(3.00000, 103.00000)", "(4.00000, 104.00000)", "(5.00000, 105.00000)", "(6.00000, 106.00000)"} + complexv := []string{"1.00000+101.00000i", "2.00000+102.00000i", "3.00000+103.00000i", "4.00000+104.00000i", "5.00000+105.00000i", "6.00000+106.00000i"} + col := result.Columns() + So(col, ShouldEqual, 9) + row := result.Rows() + So(row, ShouldEqual, 6) + for i := 0; i < result.Columns(); i++ { + re := result.GetColumnByIndex(i) + switch i { + case 0: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, datetimev[j]) + } + } + case 1: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, num[j]) + } + } + case 2: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, name[j]) + } + } + case 3: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, boolv[j]) + } + } + case 4: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, uuidv) + } + } + case 5: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, ipaddrv) + } + } + case 6: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, int128v) + } + } + case 7: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, pointv[j]) + } + } + case 8: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, complexv[j]) + } + } + } + } + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_StreamData_table(t *testing.T) { + Convey("Test_Table_streamData_table:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("n = 6;timev = take(2022.01.03T12:00:00+1..n, n);num = take(1023.002+1..n, n);name = take(`a`b`c`d`e`f, n);boolv = take(true false , n);uuidv = take(uuid('a268652a-6c8e-5686-5dd9-4ab882ecb969'), n);ipaddrv = take(ipaddr('191.168.13.16'), n);int128v = take(int128('97b48f09119a1d91d44fd12893226af8'), n);pointv = take(point(0.0+1..n,100.0+1..n), n);complexv = take(complex(0.0+1..n,100.0+1..n), n);t=streamTable(timev, num, name, boolv, uuidv, ipaddrv, int128v, pointv, complexv);select * from t") + So(err, ShouldBeNil) + result := s.(*model.Table) + datetimev := []time.Time{time.Date(2022, 1, 3, 12, 00, 1, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 2, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 3, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 4, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 5, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 6, 0, time.UTC)} + num := []float64{1024.002, 1025.002, 1026.002, 1027.002, 1028.002, 1029.002} + name := []string{"a", "b", "c", "d", "e", "f"} + boolv := []bool{true, false, true, false, true, false} + uuidv := string("a268652a-6c8e-5686-5dd9-4ab882ecb969") + ipaddrv := string("191.168.13.16") + int128v := string("97b48f09119a1d91d44fd12893226af8") + pointv := []string{"(1.00000, 101.00000)", "(2.00000, 102.00000)", "(3.00000, 103.00000)", "(4.00000, 104.00000)", "(5.00000, 105.00000)", "(6.00000, 106.00000)"} + complexv := []string{"1.00000+101.00000i", "2.00000+102.00000i", "3.00000+103.00000i", "4.00000+104.00000i", "5.00000+105.00000i", "6.00000+106.00000i"} + col := result.Columns() + So(col, ShouldEqual, 9) + row := result.Rows() + So(row, ShouldEqual, 6) + for i := 0; i < result.Columns(); i++ { + re := result.GetColumnByIndex(i) + switch i { + case 0: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, datetimev[j]) + } + } + case 1: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, num[j]) + } + } + case 2: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, name[j]) + } + } + case 3: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, boolv[j]) + } + } + case 4: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, uuidv) + } + } + case 5: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, ipaddrv) + } + } + case 6: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, int128v) + } + } + case 7: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, pointv[j]) + } + } + case 8: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, complexv[j]) + } + } + } + } + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_indexed_table(t *testing.T) { + Convey("Test_Table_indexed_table:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("n = 6;timev = take(2022.01.03T12:00:00+1..n, n);num = take(1023.002+1..n, n);name = take(`a`b`c`d`e`f, n);boolv = take(true false , n);uuidv = take(uuid('a268652a-6c8e-5686-5dd9-4ab882ecb969'), n);ipaddrv = take(ipaddr('191.168.13.16'), n);int128v = take(int128('97b48f09119a1d91d44fd12893226af8'), n);pointv = take(point(0.0+1..n,100.0+1..n), n);complexv = take(complex(0.0+1..n,100.0+1..n), n);t1=table(timev, num, name, boolv, uuidv, ipaddrv, int128v, pointv, complexv);t=indexedTable(`timev, t1);select * from t") + So(err, ShouldBeNil) + result := s.(*model.Table) + datetimev := []time.Time{time.Date(2022, 1, 3, 12, 00, 1, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 2, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 3, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 4, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 5, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 6, 0, time.UTC)} + num := []float64{1024.002, 1025.002, 1026.002, 1027.002, 1028.002, 1029.002} + name := []string{"a", "b", "c", "d", "e", "f"} + boolv := []bool{true, false, true, false, true, false} + uuidv := string("a268652a-6c8e-5686-5dd9-4ab882ecb969") + ipaddrv := string("191.168.13.16") + int128v := string("97b48f09119a1d91d44fd12893226af8") + pointv := []string{"(1.00000, 101.00000)", "(2.00000, 102.00000)", "(3.00000, 103.00000)", "(4.00000, 104.00000)", "(5.00000, 105.00000)", "(6.00000, 106.00000)"} + complexv := []string{"1.00000+101.00000i", "2.00000+102.00000i", "3.00000+103.00000i", "4.00000+104.00000i", "5.00000+105.00000i", "6.00000+106.00000i"} + col := result.Columns() + So(col, ShouldEqual, 9) + row := result.Rows() + So(row, ShouldEqual, 6) + for i := 0; i < result.Columns(); i++ { + re := result.GetColumnByIndex(i) + switch i { + case 0: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, datetimev[j]) + } + } + case 1: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, num[j]) + } + } + case 2: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, name[j]) + } + } + case 3: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, boolv[j]) + } + } + case 4: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, uuidv) + } + } + case 5: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, ipaddrv) + } + } + case 6: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, int128v) + } + } + case 7: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, pointv[j]) + } + } + case 8: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, complexv[j]) + } + } + } + } + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_keyed_table(t *testing.T) { + Convey("Test_Table_keyed_table:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("n = 6;timev = take(2022.01.03T12:00:00+1..n, n);num = take(1023.002+1..n, n);name = take(`a`b`c`d`e`f, n);boolv = take(true false , n);uuidv = take(uuid('a268652a-6c8e-5686-5dd9-4ab882ecb969'), n);ipaddrv = take(ipaddr('191.168.13.16'), n);int128v = take(int128('97b48f09119a1d91d44fd12893226af8'), n);pointv = take(point(0.0+1..n,100.0+1..n), n);complexv = take(complex(0.0+1..n,100.0+1..n), n);t1=table(timev, num, name, boolv, uuidv, ipaddrv, int128v, pointv, complexv);t=keyedTable(`timev, t1);select * from t") + So(err, ShouldBeNil) + result := s.(*model.Table) + datetimev := []time.Time{time.Date(2022, 1, 3, 12, 00, 1, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 2, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 3, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 4, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 5, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 6, 0, time.UTC)} + num := []float64{1024.002, 1025.002, 1026.002, 1027.002, 1028.002, 1029.002} + name := []string{"a", "b", "c", "d", "e", "f"} + boolv := []bool{true, false, true, false, true, false} + uuidv := string("a268652a-6c8e-5686-5dd9-4ab882ecb969") + ipaddrv := string("191.168.13.16") + int128v := string("97b48f09119a1d91d44fd12893226af8") + pointv := []string{"(1.00000, 101.00000)", "(2.00000, 102.00000)", "(3.00000, 103.00000)", "(4.00000, 104.00000)", "(5.00000, 105.00000)", "(6.00000, 106.00000)"} + complexv := []string{"1.00000+101.00000i", "2.00000+102.00000i", "3.00000+103.00000i", "4.00000+104.00000i", "5.00000+105.00000i", "6.00000+106.00000i"} + col := result.Columns() + So(col, ShouldEqual, 9) + row := result.Rows() + So(row, ShouldEqual, 6) + for i := 0; i < result.Columns(); i++ { + re := result.GetColumnByIndex(i) + switch i { + case 0: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, datetimev[j]) + } + } + case 1: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, num[j]) + } + } + case 2: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, name[j]) + } + } + case 3: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, boolv[j]) + } + } + case 4: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, uuidv) + } + } + case 5: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, ipaddrv) + } + } + case 6: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, int128v) + } + } + case 7: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, pointv[j]) + } + } + case 8: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, complexv[j]) + } + } + } + } + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_mvccTable(t *testing.T) { + Convey("Test_Table_mvccTable:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("n = 6;timev = take(2022.01.03T12:00:00+1..n, n);num = take(1023.002+1..n, n);name = take(`a`b`c`d`e`f, n);boolv = take(true false , n);uuidv = take(uuid('a268652a-6c8e-5686-5dd9-4ab882ecb969'), n);ipaddrv = take(ipaddr('191.168.13.16'), n);int128v = take(int128('97b48f09119a1d91d44fd12893226af8'), n);pointv = take(point(0.0+1..n,100.0+1..n), n);complexv = take(complex(0.0+1..n,100.0+1..n), n);t=mvccTable(timev, num, name, boolv, uuidv, ipaddrv, int128v, pointv, complexv);select * from t") + So(err, ShouldBeNil) + result := s.(*model.Table) + datetimev := []time.Time{time.Date(2022, 1, 3, 12, 00, 1, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 2, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 3, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 4, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 5, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 6, 0, time.UTC)} + num := []float64{1024.002, 1025.002, 1026.002, 1027.002, 1028.002, 1029.002} + name := []string{"a", "b", "c", "d", "e", "f"} + boolv := []bool{true, false, true, false, true, false} + uuidv := string("a268652a-6c8e-5686-5dd9-4ab882ecb969") + ipaddrv := string("191.168.13.16") + int128v := string("97b48f09119a1d91d44fd12893226af8") + pointv := []string{"(1.00000, 101.00000)", "(2.00000, 102.00000)", "(3.00000, 103.00000)", "(4.00000, 104.00000)", "(5.00000, 105.00000)", "(6.00000, 106.00000)"} + complexv := []string{"1.00000+101.00000i", "2.00000+102.00000i", "3.00000+103.00000i", "4.00000+104.00000i", "5.00000+105.00000i", "6.00000+106.00000i"} + col := result.Columns() + So(col, ShouldEqual, 9) + row := result.Rows() + So(row, ShouldEqual, 6) + for i := 0; i < result.Columns(); i++ { + re := result.GetColumnByIndex(i) + switch i { + case 0: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, datetimev[j]) + } + } + case 1: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, num[j]) + } + } + case 2: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, name[j]) + } + } + case 3: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, boolv[j]) + } + } + case 4: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, uuidv) + } + } + case 5: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, ipaddrv) + } + } + case 6: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, int128v) + } + } + case 7: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, pointv[j]) + } + } + case 8: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, complexv[j]) + } + } + } + } + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_cachedTable(t *testing.T) { + Convey("Test_Table_cachedTable:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("def f1(mutable t){return t};n = 6;timev = take(2022.01.03T12:00:00+1..n, n);num = take(1023.002+1..n, n);name = take(`a`b`c`d`e`f, n);boolv = take(true false , n);uuidv = take(uuid('a268652a-6c8e-5686-5dd9-4ab882ecb969'), n);ipaddrv = take(ipaddr('191.168.13.16'), n);int128v = take(int128('97b48f09119a1d91d44fd12893226af8'), n);pointv = take(point(0.0+1..n,100.0+1..n), n);complexv = take(complex(0.0+1..n,100.0+1..n), n);t=table(timev, num, name, boolv, uuidv, ipaddrv, int128v, pointv, complexv);ct=cachedTable(f1{t}, 2);select * from ct;") + So(err, ShouldBeNil) + result := s.(*model.Table) + datetimev := []time.Time{time.Date(2022, 1, 3, 12, 00, 1, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 2, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 3, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 4, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 5, 0, time.UTC), time.Date(2022, 1, 3, 12, 00, 6, 0, time.UTC)} + num := []float64{1024.002, 1025.002, 1026.002, 1027.002, 1028.002, 1029.002} + name := []string{"a", "b", "c", "d", "e", "f"} + boolv := []bool{true, false, true, false, true, false} + uuidv := string("a268652a-6c8e-5686-5dd9-4ab882ecb969") + ipaddrv := string("191.168.13.16") + int128v := string("97b48f09119a1d91d44fd12893226af8") + pointv := []string{"(1.00000, 101.00000)", "(2.00000, 102.00000)", "(3.00000, 103.00000)", "(4.00000, 104.00000)", "(5.00000, 105.00000)", "(6.00000, 106.00000)"} + complexv := []string{"1.00000+101.00000i", "2.00000+102.00000i", "3.00000+103.00000i", "4.00000+104.00000i", "5.00000+105.00000i", "6.00000+106.00000i"} + col := result.Columns() + So(col, ShouldEqual, 9) + row := result.Rows() + So(row, ShouldEqual, 6) + for i := 0; i < result.Columns(); i++ { + re := result.GetColumnByIndex(i) + switch i { + case 0: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, datetimev[j]) + } + } + case 1: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, num[j]) + } + } + case 2: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, name[j]) + } + } + case 3: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, boolv[j]) + } + } + case 4: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, uuidv) + } + } + case 5: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, ipaddrv) + } + } + case 6: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, int128v) + } + } + case 7: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, pointv[j]) + } + } + case 8: + { + for j := 0; j < result.Rows(); j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, complexv[j]) + } + } + } + } + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_with_bigArray(t *testing.T) { + Convey("Test_Table_with_bigArray:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("n=9000000;id = bigarray(INT,0,n).append!(take(long(1..n),n));name = bigarray(SYMBOL,0,n).append!(take(`A`S`B`C`D, n));table1 = (table(id, name));select * from table1") + So(err, ShouldBeNil) + result := s.(*model.Table) + col := result.Columns() + So(col, ShouldEqual, 2) + row := result.Rows() + So(row, ShouldEqual, 9000000) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_DownLoad_with_wide_table(t *testing.T) { + Convey("Test_Table_with_wide_table:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("a = take([(`A)+string(1..10)],10000);a.append!(`A1`a2`a3```````a10);a.append!(``````````);t = table(a);t") + So(err, ShouldBeNil) + result := s.(*model.Table) + row := result.Rows() + So(row, ShouldEqual, 10) + col := result.Columns() + So(col, ShouldEqual, 10002) + So(db.Close(), ShouldBeNil) + zx := []string{"A1", "A2", "A3", "A4", "A5", "A6", "A7", "A8", "A9", "A10"} + re := result.GetColumnByIndex(100) + for j := 0; j < row; j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, zx[j]) + } + zx = []string{"A1", "a2", "a3", "", "", "", "", "", "", "a10"} + re = result.GetColumnByIndex(10000) + for j := 0; j < row; j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, zx[j]) + } + zx = []string{"", "", "", "", "", "", "", "", "", ""} + re = result.GetColumnByIndex(10001) + for j := 0; j < row; j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, zx[j]) + } + }) +} +func Test_Table_DownLoad_with_array_vector(t *testing.T) { + Convey("Test_Table_with_array_vector:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("bid = array(DOUBLE[], 0, 20).append!([1.4799 NULL 1.4787, , 1.4791 1.479 1.4784]);t = table( bid as `bid);t") + So(err, ShouldBeNil) + result := s.(*model.Table) + row := result.Rows() + So(row, ShouldEqual, 3) + col := result.Columns() + So(col, ShouldEqual, 1) + So(db.Close(), ShouldBeNil) + hasnull := []float64{1.4799, 0, 1.4787} + notnull := []float64{1.4791, 1.479, 1.4784} + for i := 0; i < col; i++ { + re := result.GetColumnByIndex(i) + switch i { + case 0: + { + for j := 0; j < row; j++ { + if j == 1 { + So(re.IsNull(j), ShouldBeTrue) + } else { + re := re.Get(j).Value() + So(re, ShouldEqual, hasnull[j]) + } + } + } + case 1: + { + for j := 0; j < row; j++ { + So(re.IsNull(j), ShouldBeTrue) + } + } + case 2: + { + for j := 0; j < row; j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, notnull[j]) + } + } + } + } + }) +} +func Test_Table_UpLoad_DataType_string(t *testing.T) { + Convey("Test_Table_upload_with_string:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_string:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtString, []string{"col1", "col2", "col3"}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []string{"col1", "col2", "col3"} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(IN-MEMORY TABLE)") + So(res.GetDataType(), ShouldEqual, model.DtVoid) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_int(t *testing.T) { + Convey("Test_Table_upload_with_int:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_int:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1024, model.NullInt, 369}) + So(err, ShouldBeNil) + col1, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{147, 258, 369}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col", "ss"}, []*model.Vector{model.NewVector(col), model.NewVector(col1)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []int32{1024, math.MinInt32, 369} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + So(ty.String(), ShouldEqual, "string(IN-MEMORY TABLE)") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_char(t *testing.T) { + Convey("Test_Table_upload_with_char:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_char:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtChar, []byte{127, 2, 13}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []byte{127, 2, 13} + for j := 0; j < 3; j++ { + re := re.Get(j).Value() + So(re, ShouldEqual, zx[j]) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_short(t *testing.T) { + Convey("Test_Table_upload_with_short:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_short:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtShort, []int16{127, -12552, 1024}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []int16{127, -12552, 1024} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_long(t *testing.T) { + Convey("Test_Table_upload_with_long:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_long:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtLong, []int64{1048576, -1024, 13169}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []int64{1048576, -1024, 13169} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_float(t *testing.T) { + Convey("Test_Table_upload_with_float:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_float:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtFloat, []float32{1048576.02, -1024.365, 13169.14196}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []float32{1048576.02, -1024.365, 13169.14196} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_double(t *testing.T) { + Convey("Test_Table_upload_with_double:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_double:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{1048576.02011, -1024.365, 13169.14196}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []float64{1048576.02011, -1024.365, 13169.14196} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_bool(t *testing.T) { + Convey("Test_Table_upload_with_bool:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_bool:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtBool, []byte{1, 0, 1}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []bool{true, false, true} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_date(t *testing.T) { + Convey("Test_Table_upload_with_date:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_date:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + time1 := []time.Time{time.Date(2022, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == time1[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_month(t *testing.T) { + Convey("Test_Table_upload_with_month:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_month:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtMonth, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + time1 := []time.Time{time.Date(2022, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == time1[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_time(t *testing.T) { + Convey("Test_Table_upload_with_time:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_time:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC)} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == time1[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_minute(t *testing.T) { + Convey("Test_Table_upload_with_minute:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_minute:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtMinute, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 0, 0, time.UTC)} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == time1[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_second(t *testing.T) { + Convey("Test_Table_upload_with_second:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_second:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtSecond, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC)} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == time1[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_datetime(t *testing.T) { + Convey("Test_Table_upload_with_datetime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_datetime:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtDatetime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC)} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == time1[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_timestamp(t *testing.T) { + Convey("Test_Table_upload_with_timestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_timestamp:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC)} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == time1[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_nanotime(t *testing.T) { + Convey("Test_Table_upload_with_nanotime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_nanotime:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtNanoTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC)} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == time1[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_nanotimestamp(t *testing.T) { + Convey("Test_Table_upload_with_nanotimestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_nanotimestamp:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtNanoTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == time1[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_datehour(t *testing.T) { + Convey("Test_Table_upload_with_datehour:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_datehour:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtDateHour, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + time1 := []time.Time{time.Date(2022, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == time1[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_blob(t *testing.T) { + Convey("Test_Table_upload_with_blob:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_blob:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtBlob, [][]byte{{6}, {12}, {56}, {128}}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := [][]byte{{6}, {12}, {56}, {128}} + for j := 0; j < 3; j++ { + re := re.Get(j).Value() + So(re, ShouldResemble, zx[j]) + } + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_uuid(t *testing.T) { + Convey("Test_Table_upload_with_uuid:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_uuid:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtUUID, []string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee88", "5d212a78-cc48-e3b1-4235-b4d91473ee89"}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee88", "5d212a78-cc48-e3b1-4235-b4d91473ee89"} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_ipaddr(t *testing.T) { + Convey("Test_Table_upload_with_ipaddr:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_ipaddr:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtIP, []string{"192.163.1.12", "0.0.0.0", "127.0.0.1"}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []string{"192.163.1.12", "0.0.0.0", "127.0.0.1"} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_int128(t *testing.T) { + Convey("Test_Table_upload_with_int128:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_int128:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtInt128, []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33", "e1671797c52e15f763380b45e841ec34"}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33", "e1671797c52e15f763380b45e841ec34"} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_point(t *testing.T) { + Convey("Test_Table_upload_with_point:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_point:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtPoint, [][2]float64{{1, 1}, {-1, -1024.5}, {1001022.4, -30028.75}}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"col"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []string{"(1.00000, 1.00000)", "(-1.00000, -1024.50000)", "(1001022.40000, -30028.75000)"} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_complex(t *testing.T) { + Convey("Test_Table_upload_with_complex:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_complex:", func() { + col, err := model.NewDataTypeListWithRaw(model.DtComplex, [][2]float64{{1, 1}, {-1, -1024.5}, {1001022.4, -30028.75}}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"complex"}, []*model.Vector{model.NewVector(col)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table).GetColumnByIndex(0) + zx := []string{"1.00000+1.00000i", "-1.00000+-1024.50000i", "1001022.40000+-30028.75000i"} + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == zx[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_DataType_has_all_type_part1(t *testing.T) { + Convey("Test_Table_upload_with_all_datatype:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_Table_with_all_datatype:", func() { + complexv, err := model.NewDataTypeListWithRaw(model.DtComplex, [][2]float64{{1, 1}, model.NullComplex, {1001022.4, -30028.75}}) + So(err, ShouldBeNil) + stringv, err := model.NewDataTypeListWithRaw(model.DtString, []string{"col1", "", "col3"}) + So(err, ShouldBeNil) + intv, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{1024, model.NullInt, 369}) + So(err, ShouldBeNil) + charv, err := model.NewDataTypeListWithRaw(model.DtChar, []byte{127, model.NullChar, 13}) + So(err, ShouldBeNil) + shortv, err := model.NewDataTypeListWithRaw(model.DtShort, []int16{127, model.NullShort, 1024}) + So(err, ShouldBeNil) + longv, err := model.NewDataTypeListWithRaw(model.DtLong, []int64{1048576, model.NullLong, 13169}) + So(err, ShouldBeNil) + floatv, err := model.NewDataTypeListWithRaw(model.DtFloat, []float32{1048576.02, model.NullFloat, 13169.14196}) + So(err, ShouldBeNil) + doublev, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{1048576.02011, model.NullDouble, 13169.14196}) + So(err, ShouldBeNil) + boolv, err := model.NewDataTypeListWithRaw(model.DtBool, []byte{1, model.NullBool, 1}) + So(err, ShouldBeNil) + pointv, err := model.NewDataTypeListWithRaw(model.DtPoint, [][2]float64{{1, 1}, model.NullPoint, {1001022.4, -30028.75}}) + So(err, ShouldBeNil) + symbolv, err := model.NewDataTypeListWithRaw(model.DtSymbol, []string{"*", "", "87"}) + So(err, ShouldBeNil) + datev, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), model.NullTime, time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + monthv, err := model.NewDataTypeListWithRaw(model.DtMonth, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), model.NullTime, time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + timev, err := model.NewDataTypeListWithRaw(model.DtTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), model.NullTime, time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + minutev, err := model.NewDataTypeListWithRaw(model.DtMinute, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), model.NullTime, time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + secondv, err := model.NewDataTypeListWithRaw(model.DtSecond, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), model.NullTime, time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtDatetime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), model.NullTime, time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + timestampv, err := model.NewDataTypeListWithRaw(model.DtTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), model.NullTime, time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + nanotimev, err := model.NewDataTypeListWithRaw(model.DtNanoTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), model.NullTime, time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + nanotimestampv, err := model.NewDataTypeListWithRaw(model.DtNanoTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), model.NullTime, time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + datehourv, err := model.NewDataTypeListWithRaw(model.DtDateHour, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), model.NullTime, time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + So(err, ShouldBeNil) + uuidv, err := model.NewDataTypeListWithRaw(model.DtUUID, []string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "", "5d212a78-cc48-e3b1-4235-b4d91473ee89"}) + So(err, ShouldBeNil) + ipaddrv, err := model.NewDataTypeListWithRaw(model.DtIP, []string{"192.163.1.12", "", "127.0.0.1"}) + So(err, ShouldBeNil) + int128v, err := model.NewDataTypeListWithRaw(model.DtInt128, []string{"e1671797c52e15f763380b45e841ec32", "", "e1671797c52e15f763380b45e841ec34"}) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"complex", "string", "int", "char", "short", "long", "float", "double", "point", "bool", "date", "month", "time", "minute", "second", "datetime", "timestamp", "nanotime", "nanotimestamp", "datehour", "uuid", "ipaddr", "int128", "symbol"}, []*model.Vector{model.NewVector(complexv), model.NewVector(stringv), model.NewVector(intv), model.NewVector(charv), model.NewVector(shortv), model.NewVector(longv), model.NewVector(floatv), model.NewVector(doublev), model.NewVector(pointv), model.NewVector(boolv), model.NewVector(datev), model.NewVector(monthv), model.NewVector(timev), model.NewVector(minutev), model.NewVector(secondv), model.NewVector(datetimev), model.NewVector(timestampv), model.NewVector(nanotimev), model.NewVector(nanotimestampv), model.NewVector(datehourv), model.NewVector(uuidv), model.NewVector(ipaddrv), model.NewVector(int128v), model.NewVector(symbolv)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + re := res.(*model.Table) + complexvs := []string{"1.00000+1.00000i", "", "1001022.40000+-30028.75000i"} + intvs := []int32{1024, model.NullInt, 369} + stringvs := []string{"col1", "", "col3"} + charvs := []byte{127, model.NullChar, 13} + shortvs := []int16{127, model.NullShort, 1024} + longvs := []int64{1048576, model.NullLong, 13169} + floatvs := []float32{1048576.02, model.NullFloat, 13169.14196} + doublevs := []float64{1048576.02011, model.NullDouble, 13169.14196} + boolvs := []bool{true, false, true} + pointvs := []string{"(1.00000, 1.00000)", "(,)", "(1001022.40000, -30028.75000)"} + datevs := []time.Time{time.Date(2022, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC)} + monthvs := []time.Time{time.Date(2022, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC)} + timevs := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC)} + minutevs := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 0, 0, time.UTC)} + secondvs := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC)} + datetimevs := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC)} + symbolvs := []string{"*", "", "87"} + timestampvs := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC)} + nanotimevs := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC)} + nanotimestampvs := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)} + datehourvs := []time.Time{time.Date(2022, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + uuidvs := []string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "00000000-0000-0000-0000-000000000000", "5d212a78-cc48-e3b1-4235-b4d91473ee89"} + ipaddrvs := []string{"192.163.1.12", "0.0.0.0", "127.0.0.1"} + int128vs := []string{"e1671797c52e15f763380b45e841ec32", "00000000000000000000000000000000", "e1671797c52e15f763380b45e841ec34"} + re2 := re.GetColumnByIndex(3) + for j := 0; j < 3; j++ { + if j == 1 { + So(re2.Get(j).IsNull(), ShouldBeTrue) + } else { + So(re2.Get(j).Value(), ShouldEqual, charvs[j]) + } + } + re1 := res.(*model.Table).GetColumnByIndex(0) + var k int + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == complexvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(1) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == stringvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(2) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == intvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(4) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == shortvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(5) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == longvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(6) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == floatvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(7) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == doublevs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + boolV := re.GetColumnByIndex(9) + for j := 0; j < 3; j++ { + if j == 1 { + So(boolV.IsNull(j), ShouldBeTrue) + } else { + re := boolV.Get(j).Value() + So(re, ShouldEqual, boolvs[j]) + } + } + re1 = res.(*model.Table).GetColumnByIndex(8) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == pointvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(10) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == datevs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(11) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == monthvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(12) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == timevs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(13) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == minutevs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(14) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == secondvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(15) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == datetimevs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(16) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == timestampvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(17) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == nanotimevs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(18) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == nanotimestampvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(19) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == datehourvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(20) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == uuidvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(21) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == ipaddrvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(22) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == int128vs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + re1 = res.(*model.Table).GetColumnByIndex(23) + k = 0 + for i := 0; i < int(re1.RowCount); i++ { + if re1.Get(i).Value() == symbolvs[i] { + k++ + } + } + So(k, ShouldEqual, re1.RowCount) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Table_UpLoad_big_array(t *testing.T) { + Convey("Test_Table_upload_with_big_array:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + var i int32 + intv := []int32{} + for i = 0; i < 3000000*12; i += 12 { + intv = append(intv, i) + } + intv = append(intv, model.NullInt) + col, err := model.NewDataTypeListWithRaw(model.DtInt, intv) + So(err, ShouldBeNil) + stringv := []string{} + for i = 0; i < 3000000*12; i += 12 { + stringv = append(stringv, string("hello")) + } + stringv = append(stringv, model.NullString) + col1, err := model.NewDataTypeListWithRaw(model.DtString, stringv) + So(err, ShouldBeNil) + allnull := []string{} + for i = 0; i < 3000001*12; i += 12 { + allnull = append(allnull, model.NullString) + } + allnullv, err := model.NewDataTypeListWithRaw(model.DtString, allnull) + So(err, ShouldBeNil) + tb := model.NewTable([]string{"int_v", "str_v", "all_null"}, []*model.Vector{model.NewVector(col), model.NewVector(col1), model.NewVector(allnullv)}) + _, err = db.Upload(map[string]model.DataForm{"s": tb}) + So(err, ShouldBeNil) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Table).GetColumnByIndex(0) + var k int + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == intv[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + re = res.(*model.Table).GetColumnByIndex(1) + k = 0 + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == stringv[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + re = res.(*model.Table).GetColumnByIndex(2) + k = 0 + for i := 0; i < int(re.RowCount); i++ { + if re.Get(i).Value() == allnull[i] { + k++ + } + } + So(k, ShouldEqual, re.RowCount) + So(ty.String(), ShouldEqual, "string(IN-MEMORY TABLE)") + So(db.Close(), ShouldBeNil) + }) +} diff --git a/test/basicTypeTest/basicVector_test.go b/test/basicTypeTest/basicVector_test.go new file mode 100644 index 0000000..dafcd47 --- /dev/null +++ b/test/basicTypeTest/basicVector_test.go @@ -0,0 +1,2345 @@ +package test + +import ( + "context" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func Test_Vector_Download_Datatype_string(t *testing.T) { + Convey("Test_vector_string:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_string_not_null:", func() { + s, err := db.RunScript("string(`ibm `你好 `yhoo)") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + reType := result.GetDataType() + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + zx := [3]string{"ibm", "你好", "yhoo"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + form := result.GetDataForm() + So(form, ShouldEqual, 1) + So(result.HashBucket(1, 1), ShouldEqual, 0) + }) + Convey("Test_vector_string_has_null:", func() { + s, err := db.RunScript("string(`ibm ` `yhoo)") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + reType := result.GetDataType() + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + zx := [3]string{"ibm", "", "yhoo"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + result1 := s.(*model.Vector).SetNull + So(result1, ShouldNotBeNil) + So(result.Get(1).IsNull(), ShouldEqual, true) + hush := result.HashBucket(1, 1) + So(hush, ShouldEqual, 0) + }) + Convey("Test_vector_string_all_null:", func() { + s, err := db.RunScript("string(` ` ` )") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 18) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "string") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_any(t *testing.T) { + Convey("Test_vector_any:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_any:", func() { + s, err := db.RunScript("(1,'a',3,'','97c','2022.03.08')") + So(err, ShouldBeNil) + result := s.(*model.Vector) + reType := result.GetDataType() + So(reType, ShouldEqual, 25) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "any") + result1 := s.(*model.Vector).SetNull + So(result1, ShouldNotBeNil) + hush := result.HashBucket(1, 1) + So(hush, ShouldEqual, 0) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_char(t *testing.T) { + Convey("Test_vector_char:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_char_not_null:", func() { + s, err := db.RunScript("2c 98c 127c") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + reType := result.GetDataType() + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + zx := [3]uint8{2, 98, 127} + for i := 0; i < len(re); i++ { + So(re[i], ShouldEqual, zx[i]) + } + row := result.Rows() + So(row, ShouldNotBeNil) + idex := 2 + result.SetNull(idex) + So(result.IsNull(idex), ShouldBeTrue) + }) + Convey("Test_vector_char_has_null:", func() { + s, err := db.RunScript("a = take(char[97c,,99c],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + reType := result.GetDataType() + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + zx := [3]uint8{97, 0, 99} + for i := 0; i < len(re); i++ { + if i == 1 { + So(result.IsNull(i), ShouldEqual, true) + } else { + So(re[i], ShouldEqual, zx[i]) + } + } + hush := result.HashBucket(1, 1) + So(hush, ShouldEqual, -1) + }) + Convey("Test_vector_char_all_null:", func() { + s, err := db.RunScript("a= char(` ` ` );a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + for i := 0; i < len(re); i++ { + So(result.IsNull(i), ShouldEqual, true) + } + reType := result.GetDataType() + So(reType, ShouldEqual, 2) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "char") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_bool(t *testing.T) { + Convey("Test_vector_bool:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_bool_not_null:", func() { + s, err := db.RunScript("true false true") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + reType := result.GetDataType() + So(reType, ShouldEqual, 1) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "bool") + zx := [3]bool{true, false, true} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + }) + Convey("Test_vector_bool_has_null:", func() { + s, err := db.RunScript("a = take(bool[true,,false],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + reType := result.GetDataType() + So(reType, ShouldEqual, 1) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "bool") + zx := [3]bool{true, false, false} + var k int + for i := 0; i < len(re); i++ { + if i == 1 { + So(result.IsNull(i), ShouldEqual, true) + k++ + } else { + if re[i] == zx[i] { + k++ + } + } + } + So(k, ShouldEqual, result.Data.Len()) + }) + Convey("Test_vector_bool_all_null:", func() { + s, err := db.RunScript("take(00b, 3)") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + for i := 0; i < len(re); i++ { + So(result.IsNull(i), ShouldEqual, true) + } + reType := result.GetDataType() + So(reType, ShouldEqual, 1) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "bool") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_symbol(t *testing.T) { + Convey("Test_vector_symbol:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_symbol_not_null:", func() { + s, err := db.RunScript("symbol(`a `b `c)") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"a", "b", "c"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 17) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "symbol") + }) + Convey("Test_vector_symbol_has_null:", func() { + s, err := db.RunScript("a = take(symbol[`a ,,`c],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + reType := result.GetDataType() + So(reType, ShouldEqual, 17) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "symbol") + zx := [3]string{"a", "", "c"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + }) + Convey("Test_vector_symbol_all_null:", func() { + s, err := db.RunScript("a=symbol(` ` ` );a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 145) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "symbolExtend") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_int(t *testing.T) { + Convey("Test_vector_int:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_int_not_null:", func() { + s, err := db.RunScript("123 -321 1234") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]int32{123, -321, 1234} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + hush := result.HashBucket(1, 1) + So(hush, ShouldEqual, 0) + }) + Convey("Test_vector_int_has_null:", func() { + s, err := db.RunScript("a = take(int[123 ,, 1234],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]int32{123, model.NullInt, 1234} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + Convey("Test_vector_int_all_null:", func() { + s, err := db.RunScript("take(00i, 3)") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if re[i] == model.NullInt { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 4) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_short(t *testing.T) { + Convey("Test_vector_short:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_short_not_null:", func() { + s, err := db.RunScript("12h -32h 123h") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]int16{12, -32, 123} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + get := result.Get(1).Value() + if v, ok := get.(int); ok { + So(v, ShouldEqual, -32) + } + hush := result.HashBucket(1, 1) + So(hush, ShouldEqual, 0) + }) + Convey("Test_vector_short_has_null:", func() { + s, err := db.RunScript("a = take(short[-1258 ,, 17685],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]int16{-1258, 0, 17685} + var k int + for i := 0; i < len(re); i++ { + if i == 1 { + if result.IsNull(i) == true { + k++ + } + } else { + if re[i] == zx[i] { + k++ + } + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + Convey("Test_vector_short_all_null:", func() { + s, err := db.RunScript("take(00h,3)") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 3) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "short") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_long(t *testing.T) { + Convey("Test_vector_long:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_long_not_null:", func() { + s, err := db.RunScript("12l -32l 123l") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]int64{12, -32, 123} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + asof := result.AsOf(result.Get(0)) + So(asof, ShouldEqual, 1) + }) + Convey("Test_vector_long_has_null:", func() { + s, err := db.RunScript("a = take(long[1048576 ,, 1048578],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]int64{1048576, model.NullLong, 1048578} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + }) + Convey("Test_vector_long_all_null:", func() { + s, err := db.RunScript("take(00l,3)") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if re[i] == model.NullLong { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 5) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + hush := result.HashBucket(1, 1) + So(hush, ShouldEqual, -1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_double(t *testing.T) { + Convey("Test_vector_double:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_double:", func() { + s, err := db.RunScript("12.0 -32.0 123.0") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]float64{12.0, -32.0, 123.0} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_vector_double_has_null:", func() { + s, err := db.RunScript("a = take(double[1048576.0 ,, 1048578.0],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]float64{1048576, model.NullDouble, 1048578} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + Convey("Test_vector_double_all_null:", func() { + s, err := db.RunScript("double(['','',''])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if re[i] == model.NullDouble { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 16) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_float(t *testing.T) { + Convey("Test_vector_float:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_float_not_null:", func() { + s, err := db.RunScript("12.5f -32.5f 123.5f") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]float32{12.5, -32.5, 123.5} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_vector_float_has_null:", func() { + s, err := db.RunScript("a = take(float[1048576.0 ,, 1048578.0],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]float32{1048576, model.NullFloat, 1048578} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + Convey("Test_vector_float_all_null:", func() { + s, err := db.RunScript("take(00f, 3)") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if re[i] == model.NullFloat { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 15) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "float") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_date(t *testing.T) { + Convey("Test_vector_date:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_date_not_null:", func() { + s, err := db.RunScript("1969.12.31 1970.01.01 1970.01.02 2006.01.02 2006.01.03 2022.08.03") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 2, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 3, 0, 0, 0, 0, time.UTC), time.Date(2022, 8, 3, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + Convey("Test_vector_date_has_null:", func() { + s, err := db.RunScript("a = take(date[2022.07.29,,2022.07.31],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := [3]string{"2022-07-29 00:00:00", "", "2022-07-31 00:00:00"} + t0, _ := time.Parse("2006-01-02 15:04:05", time1[0]) + t2, _ := time.Parse("2006-01-02 15:04:05", time1[2]) + So(re[0], ShouldEqual, t0) + So(re[2], ShouldEqual, t2) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + Convey("Test_vector_date_all_null:", func() { + s, err := db.RunScript("take(00d, 3)") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 6) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "date") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_month(t *testing.T) { + Convey("Test_vector_month:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_month_not_null:", func() { + s, err := db.RunScript("1969.12M 1970.01M 1970.02M 2006.01M 2006.02M 2022.08M") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := []time.Time{time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 2, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 2, 1, 0, 0, 0, 0, time.UTC), time.Date(2022, 8, 1, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + Convey("Test_vector_month_has_null:", func() { + s, err := db.RunScript("a = take(month[2022.07M,,2022.09M],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := [3]string{"2022-07", "", "2022-09"} + t0, _ := time.Parse("2006-01", time1[0]) + t2, _ := time.Parse("2006-01", time1[2]) + So(re[0], ShouldEqual, t0) + So(re[2], ShouldEqual, t2) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + Convey("Test_vector_month_all_null:", func() { + s, err := db.RunScript("month(['','',''])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 7) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "month") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_time(t *testing.T) { + Convey("Test_vector_time:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_time_not_null:", func() { + s, err := db.RunScript("23:59:59.999 00:00:00.000 00:00:01.999 15:04:04.999 15:04:05.000 15:00:15.000") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + }) + Convey("Test_vector_time_has_null:", func() { + s, err := db.RunScript("a = take(time[15:59:23.001,,15:59:23.003],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := [3]string{"1970-01-01T15:59:23.001", "", "1970-01-01T15:59:23.003"} + t0, _ := time.Parse("2006-01-02T15:04:05.000", time1[0]) + t2, _ := time.Parse("2006-01-02T15:04:05.000", time1[2]) + So(re[0], ShouldEqual, t0) + So(re[2], ShouldEqual, t2) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + }) + Convey("Test_vector_time_all_null:", func() { + s, err := db.RunScript("time(['','',''])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 8) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "time") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_minute(t *testing.T) { + Convey("Test_vector_minute:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_minute_not_null:", func() { + s, err := db.RunScript("23:59m 00:00m 00:01m 15:04m 15:05m 15:15m") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 1, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 5, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 15, 0, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + }) + Convey("Test_vector_minute_has_null:", func() { + s, err := db.RunScript("a = take(minute[15:56m,,15:58m],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := [3]string{"1970-01-01T15:56", "", "1970-01-01T15:58"} + t0, _ := time.Parse("2006-01-02T15:04", time1[0]) + t2, _ := time.Parse("2006-01-02T15:04", time1[2]) + So(re[0], ShouldEqual, t0) + So(re[2], ShouldEqual, t2) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + }) + Convey("Test_vector_minute_all_null:", func() { + s, err := db.RunScript("take(00m, 3)") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 9) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "minute") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_second(t *testing.T) { + Convey("Test_vector_second:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_second_not_null:", func() { + s, err := db.RunScript("23:59:59 00:00:00 00:00:01 15:04:04 15:04:05 15:00:15") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + }) + Convey("Test_vector_second_has_null:", func() { + s, err := db.RunScript("a = take(second[15:55:01,,15:55:03],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := [3]string{"1970-01-01T15:55:01", "", "1970-01-01T15:55:03"} + t0, _ := time.Parse("2006-01-02T15:04:05", time1[0]) + t2, _ := time.Parse("2006-01-02T15:04:05", time1[2]) + So(re[0], ShouldEqual, t0) + So(re[2], ShouldEqual, t2) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + }) + Convey("Test_vector_second_all_null:", func() { + s, err := db.RunScript("take(00s, 3)") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 10) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "second") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_datetime(t *testing.T) { + Convey("Test_vector_datetime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_datetime_not_null:", func() { + s, err := db.RunScript("1969.12.31T23:59:59 1970.01.01T00:00:00 1970.01.01T00:00:01 2006.01.02T15:04:04 2006.01.02T15:04:05 2022.08.03T15:00:15") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + Convey("Test_vector_datetime_has_null:", func() { + s, err := db.RunScript("a = take(datetime[2022.07.29 15:33:33,,2022.07.29 15:33:35],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := [3]string{"2022-07-29T15:33:33", "", "2022-07-29T15:33:35"} + t0, _ := time.Parse("2006-01-02T15:04:05", time1[0]) + t2, _ := time.Parse("2006-01-02T15:04:05", time1[2]) + So(re[0], ShouldEqual, t0) + So(re[2], ShouldEqual, t2) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + Convey("Test_vector_datetime_all_null:", func() { + s, err := db.RunScript("datetime(['','',''])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 11) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_timestamp(t *testing.T) { + Convey("Test_vector_timestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_timestamp_not_null:", func() { + s, err := db.RunScript("1969.12.31T23:59:59.999 1970.01.01T00:00:00.000 1970.01.01T00:00:01.999 2006.01.02T15:04:04.999 2006.01.02T15:04:05.000 2022.08.03T15:00:15.000") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + Convey("Test_vector_timestamp_has_null:", func() { + s, err := db.RunScript("a = take(timestamp[2022.07.29 15:00:04.201,,2022.07.29 15:00:04.203],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := [3]string{"2022-07-29T15:00:04.201", "", "2022-07-29T15:00:04.203"} + t0, _ := time.Parse("2006-01-02T15:04:05", time1[0]) + t2, _ := time.Parse("2006-01-02T15:04:05", time1[2]) + So(re[0], ShouldEqual, t0) + So(re[2], ShouldEqual, t2) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + Convey("Test_vector_timestamp_all_null:", func() { + s, err := db.RunScript("timestamp(['','',''])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 12) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "timestamp") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_nanotime(t *testing.T) { + Convey("Test_vector_nanotime:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_nanotime_not_null:", func() { + s, err := db.RunScript("23:59:59.999999999 00:00:00.000000000 00:00:01.999999999 15:04:04.999999999 15:04:05.000000000 15:00:15.000000000") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 5, 0, time.UTC), time.Date(1970, 1, 1, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + }) + Convey("Test_vector_nanotime_has_null:", func() { + s, err := db.RunScript("a = take(nanotime[15:00:04.000000201,,15:00:04.000000203],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := [3]string{"1970-01-01 15:00:04.000000201", "", "1970-01-01 15:00:04.000000203"} + t0, _ := time.Parse("2006-01-02 15:04:05.000000000", time1[0]) + t2, _ := time.Parse("2006-01-02 15:04:05.000000000", time1[2]) + So(re[0], ShouldEqual, t0) + So(re[2], ShouldEqual, t2) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + }) + Convey("Test_vector_nanotime_all_null:", func() { + s, err := db.RunScript("nanotime(['','',''])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 13) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotime") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_nanotimestamp(t *testing.T) { + Convey("Test_vector_nanotimestamp:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_nanotimestamp_not_null:", func() { + s, err := db.RunScript("1969.12.31T23:59:59.999999999 1970.01.01T00:00:00.000000000 1970.01.01T00:00:01.999999999 2006.01.02T15:04:04.999999999 2006.01.02T15:04:05.000000000 2022.08.03T15:00:15.000000000") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 1, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC), time.Date(2022, 8, 3, 15, 0, 15, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + Convey("Test_vector_nanotimestamp_has_null:", func() { + s, err := db.RunScript("a = take(nanotimestamp[2022.07.29 15:00:04.000000201,,2022.07.29 15:00:04.000000203],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := [3]string{"2022-07-29T15:00:04.000000201", "", "2022-07-29T15:00:04.000000203"} + t0, _ := time.Parse("2006-01-02T15:04:05.000000000", time1[0]) + t2, _ := time.Parse("2006-01-02T15:04:05.000000000", time1[2]) + So(re[0], ShouldEqual, t0) + So(re[2], ShouldEqual, t2) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + Convey("Test_vector_nanotimestamp_all_null:", func() { + s, err := db.RunScript("nanotimestamp(['','',''])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 14) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "nanotimestamp") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_datehour(t *testing.T) { + Convey("Test_vector_datehour:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_datehour_not_null:", func() { + s, err := db.RunScript("datehour[1969.12.31T23:59:59.999, 1970.01.01T00:00:00.000, 2006.01.02T15:04:04.999]") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := []time.Time{time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, result.Rows()) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + }) + Convey("Test_vector_datehour_has_null:", func() { + s, err := db.RunScript("a = take(datehour[2022.07.29 15:00:00.000,,2022.07.29 17:00:00.000],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + time1 := [3]string{"2022-07-29T15", "", "2022-07-29T17"} + t0, _ := time.Parse("2006-01-02T15", time1[0]) + t2, _ := time.Parse("2006-01-02T15", time1[2]) + So(re[0], ShouldEqual, t0) + So(re[2], ShouldEqual, t2) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + }) + Convey("Test_vector_datehour_all_null:", func() { + s, err := db.RunScript("datehour(['','',''])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 28) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "dateHour") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_uuid(t *testing.T) { + Convey("Test_vector_uuid:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_uuid_not_null:", func() { + s, err := db.RunScript("uuid(['5d212a78-cc48-e3b1-4235-b4d91473ee87', '5d212a78-cc48-e3b1-4235-b4d91473ee88', '5d212a78-cc48-e3b1-4235-b4d91473ee89'])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee88", "5d212a78-cc48-e3b1-4235-b4d91473ee89"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 19) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuid") + }) + Convey("Test_vector_uuid_has_null:", func() { + s, err := db.RunScript("a = take(uuid['5d212a78-cc48-e3b1-4235-b4d91473ee87', '', '5d212a78-cc48-e3b1-4235-b4d91473ee89'],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "", "5d212a78-cc48-e3b1-4235-b4d91473ee89"} + var k int + for i := 0; i < len(re); i++ { + if i == 1 { + if result.IsNull(i) == true { + k++ + } + } else { + if re[i] == zx[i] { + k++ + } + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 19) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuid") + }) + Convey("Test_vector_uuid_all_null:", func() { + s, err := db.RunScript("uuid(['','',''])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + for i := 0; i < len(re); i++ { + So(result.IsNull(i), ShouldEqual, true) + } + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 19) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuid") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_ipaddr(t *testing.T) { + Convey("Test_vector_ipaddr:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_ipaddr_not_null:", func() { + s, err := db.RunScript("ipaddr(['461c:7fa1:7f3c:7249:5278:c610:f595:d174', '3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72', '127e:eeed:1b16:20a9:1694:6185:f045:fb9a'])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"461c:7fa1:7f3c:7249:5278:c610:f595:d174", "3de8:13c6:df5f:bcd5:7605:3827:e37a:3a72", "127e:eeed:1b16:20a9:1694:6185:f045:fb9a"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + }) + Convey("Test_vector_ipaddr_number_has_null:", func() { + s, err := db.RunScript("a = take(ipaddr['192.168.1.135', , '192.168.1.14'],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"192.168.1.135", "0.0.0.0", "192.168.1.14"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + }) + Convey("Test_vector_ipaddr_all_null:", func() { + s, err := db.RunScript("ipaddr(['', '', ''])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + }) + Convey("Test_vector_ipaddr_number_not_null:", func() { + s, err := db.RunScript("ipaddr(['192.168.1.135', '192.168.1.124', '192.168.1.14'])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"192.168.1.135", "192.168.1.124", "192.168.1.14"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 30) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "IP") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_int128(t *testing.T) { + Convey("Test_vector_int128:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_int128_not_null:", func() { + s, err := db.RunScript("int128(['e1671797c52e15f763380b45e841ec32', 'e1671797c52e15f763380b45e841ec33', 'e1671797c52e15f763380b45e841ec34'])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33", "e1671797c52e15f763380b45e841ec34"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 31) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int128") + }) + Convey("Test_vector_int128_has_null:", func() { + s, err := db.RunScript("a = take(int128['e1671797c52e15f763380b45e841ec32', , 'e1671797c52e15f763380b45e841ec34'],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"e1671797c52e15f763380b45e841ec32", "00000000000000000000000000000000", "e1671797c52e15f763380b45e841ec34"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 31) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int128") + }) + Convey("Test_vector_int128_all_null:", func() { + s, err := db.RunScript("int128(['', '', ''])") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + var k int + for i := 0; i < len(re); i++ { + if result.IsNull(i) == true { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 31) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "int128") + hush := result.HashBucket(1, 1) + So(hush, ShouldEqual, 0) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_complex(t *testing.T) { + Convey("Test_vector_complex:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_complex_not_null:", func() { + s, err := db.RunScript("a = take([complex(2,5),complex(-2,-5),complex(1048576,1048578)],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"2.00000+5.00000i", "-2.00000+-5.00000i", "1048576.00000+1048578.00000i"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + Convey("Test_vector_complex_has_null:", func() { + s, err := db.RunScript("a = take([complex(-2,5),,complex(-1048576,-1048578)],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"-2.00000+5.00000i", "0.00000+0.00000i", "-1048576.00000+-1048578.00000i"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 34) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "complex") + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_point(t *testing.T) { + Convey("Test_vector_point:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_point:", func() { + s, err := db.RunScript("a = take([point(2,5),point(-2,-5),point(1048576,1048578)],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"(2.00000, 5.00000)", "(-2.00000, -5.00000)", "(1048576.00000, 1048578.00000)"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + }) + Convey("Test_vector_point_has_null:", func() { + s, err := db.RunScript("a = take([point(-2,5),,point(-1048576,-1048578)],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + zx := [3]string{"(-2.00000, 5.00000)", "(0.00000, 0.00000)", "(-1048576.00000, -1048578.00000)"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, result.Data.Len()) + reType := result.GetDataType() + So(reType, ShouldEqual, 35) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "point") + So(result.HashBucket(1, 1), ShouldEqual, -1) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_duration(t *testing.T) { + Convey("Test_vector_duration:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_durations_not_null:", func() { + s, err := db.RunScript("a = take(duration['1H'],3);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + So(re, ShouldNotBeNil) + reType := result.GetDataType() + So(reType, ShouldEqual, 25) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "any") + str := result.String() + So(str, ShouldEqual, "vector([duration(1H), duration(1H), duration(1H)])") + }) + }) +} +func Test_Vector_Download_Datatype_vector_big_than_1024(t *testing.T) { + Convey("Test_vector_big_than_1024:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("a=take(10000.0+1..16,2048);a.append!(-1024.0);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + So(re[0], ShouldEqual, 10001.0) + So(re[2047], ShouldEqual, 10016.0) + So(re[2048], ShouldEqual, -1024.0) + reType := result.GetDataType() + So(reType, ShouldEqual, model.DtDouble) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "double") + So(result.ColumnCount, ShouldEqual, 1) + So(result.RowCount, ShouldEqual, 2049) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_vector_big_than_1048576(t *testing.T) { + Convey("Test_vector_big_than_1048576:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("a=take(datetime(2022.01.03T12:59:59.000)+1..1024,1048576);a.append!(datetime(2022.09.02T23:59:59.000));a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + ex1 := time.Date(2022, 1, 3, 13, 0, 0, 0, time.UTC) + ex2 := time.Date(2022, 1, 3, 13, 17, 3, 0, time.UTC) + ex3 := time.Date(2022, 9, 2, 23, 59, 59, 0, time.UTC) + So(re[0], ShouldEqual, ex1) + So(re[1048575], ShouldEqual, ex2) + So(re[1048576], ShouldEqual, ex3) + reType := result.GetDataType() + So(reType, ShouldEqual, model.DtDatetime) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetime") + So(result.ColumnCount, ShouldEqual, 1) + So(result.RowCount, ShouldEqual, 1048577) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_array_vector(t *testing.T) { + Convey("Test_vector_array_vector:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("a = array(INT[],0).append!([[1,1],[2,2],[3,3],[4,4],[5,5],[6]]);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.GetVectorValue(1).Data.Value() + So(re[0], ShouldEqual, 2) + reType := result.GetDataType() + So(reType, ShouldEqual, model.DtInt+64) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "intArray") + So(result.ColumnCount, ShouldEqual, 11) + So(result.RowCount, ShouldEqual, 6) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_array_vector_empty(t *testing.T) { + Convey("Test_vector_array_vector_empty:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("a = array(UUID[],0);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + reType := result.GetDataType() + So(reType, ShouldEqual, model.DtUUID+64) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "uuidArray") + So(result.RowCount, ShouldEqual, 0) + }) +} +func Test_Vector_Download_Datatype_array_vector_big_than_1024(t *testing.T) { + Convey("Test_vector_array_vector_big_than_1024:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("a = array(DOUBLE[],0).append!(take([[1.0,1.025],[2.0,2.36954],[3.32665,3.3266],[4.115,412.15],[5.215,5.545],[6.16546],[12.7,8.2,1.9,7.36,8.65,9.96],[-123.123,-258.258,-456.369]],2048)).append!(-1024.123456);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + So(result.GetVectorValue(1).Data.Value()[1], ShouldEqual, 2.36954) + So(result.GetVectorValue(2047).Data.Value()[0], ShouldEqual, -123.123) + So(result.GetVectorValue(2048).Data.Value()[0], ShouldEqual, -1024.123456) + reType := result.GetDataType() + So(reType, ShouldEqual, model.DtDouble+64) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "doubleArray") + So(result.RowCount, ShouldEqual, 2049) + }) +} +func Test_Vector_Download_Datatype_array_vector_big_than_1048576(t *testing.T) { + Convey("Test_vector_array_vector_big_than_1048576:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("a = array(DATETIME[],0).append!(take([[1996.12.31T23:59:59,1997.01.01T00:00:00],[2006.01.02T15:04:04,2006.01.02T15:04:05],[2022.01.02T23:59:59,2006.01.02T15:04:59],[2022.09.02T15:04:04,3002.01.02T15:04:05]],1048576)).append!(2002.02.02T12:24:36);a") + So(err, ShouldBeNil) + result := s.(*model.Vector) + So(result.GetVectorValue(1).Data.Value()[1], ShouldEqual, time.Date(2006, 1, 2, 15, 4, 5, 0, time.UTC)) + So(result.GetVectorValue(1048575).Data.Value()[0], ShouldEqual, time.Date(2022, 9, 2, 15, 4, 4, 0, time.UTC)) + So(result.GetVectorValue(1048576).Data.Value()[0], ShouldEqual, time.Date(2002, 2, 2, 12, 24, 36, 0, time.UTC)) + reType := result.GetDataType() + So(reType, ShouldEqual, model.DtDatetime+64) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "datetimeArray") + So(result.RowCount, ShouldEqual, 1048577) + }) +} +func Test_Vector_Download_Datatype_bigArray(t *testing.T) { + Convey("Test_vector_bigArray:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("n=5000000;X=bigarray(long,0, n);X.append!(1..4999999);X.append!(2000000000l);X") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + So(re[4999999], ShouldEqual, 2000000000) + reType := result.GetDataType() + So(reType, ShouldEqual, model.DtLong) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + So(result.ColumnCount, ShouldEqual, 1) + So(result.RowCount, ShouldEqual, 5000000) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_Download_Datatype_subArray(t *testing.T) { + Convey("Test_vector_subArray:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + s, err := db.RunScript("n=5000000;X=bigarray(long,0, n);X.append!(1..4999999);X.append!(2000000000l);Y=subarray(X,4999998:);Y") + So(err, ShouldBeNil) + result := s.(*model.Vector) + re := result.Data.Value() + So(re[0], ShouldEqual, 4999999) + So(re[1], ShouldEqual, 2000000000) + reType := result.GetDataType() + So(reType, ShouldEqual, model.DtLong) + reTypeString := result.GetDataTypeString() + So(reTypeString, ShouldEqual, "long") + So(result.ColumnCount, ShouldEqual, 1) + So(result.RowCount, ShouldEqual, 2) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_int(t *testing.T) { + Convey("Test_vector_int_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_int:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtInt, []int32{1, 2, 3, 4, 5, 6, 7, 8, 9}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST INT VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtInt) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_short(t *testing.T) { + Convey("Test_vector_short_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_short:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtShort, []int16{1, 2, 3, 4, 5, 6, 7, 8, 9}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []int16{1, 2, 3, 4, 5, 6, 7, 8, 9} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST SHORT VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtShort) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_char(t *testing.T) { + Convey("Test_vector_char_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_char:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtChar, []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []uint8{1, 2, 3, 4, 5, 6, 7, 8, 9} + for j := 0; j < len(re); j++ { + So(re[j], ShouldEqual, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST CHAR VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtChar) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_long(t *testing.T) { + Convey("Test_vector_long_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_long:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtLong, []int64{-1, -2, -3, 4, 5, 6, 7, 8, 9}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []int64{-1, -2, -3, 4, 5, 6, 7, 8, 9} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST LONG VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtLong) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_float(t *testing.T) { + Convey("Test_vector_float_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_float:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtFloat, []float32{-1, -2, -3, 4, 5, 6, 7, 8, 9}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []float32{-1, -2, -3, 4, 5, 6, 7, 8, 9} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST FLOAT VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtFloat) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_double(t *testing.T) { + Convey("Test_vector_double_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_double:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtDouble, []float64{1024.2, -2.10, 36897542.233, -5454545454, 8989.12125, 6, -10247.36985, 8, 9}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []float64{1024.2, -2.10, 36897542.233, -5454545454, 8989.12125, 6, -10247.36985, 8, 9} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST DOUBLE VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtDouble) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_date(t *testing.T) { + Convey("Test_vector_date_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_date:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(1969, 12, 31, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST DATE VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtDate) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_month(t *testing.T) { + Convey("Test_vector_month_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_month:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtMonth, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999, time.UTC)}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + time1 := []time.Time{time.Date(2022, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(1969, 12, 1, 0, 0, 0, 0, time.UTC), time.Date(2006, 1, 1, 0, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST MONTH VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtMonth) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_time(t *testing.T) { + Convey("Test_vector_time_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_time:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 999000000, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999000000, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST TIME VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtTime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_minute(t *testing.T) { + Convey("Test_vector_minute_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_minute:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtMinute, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 23, 59, 0, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 0, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST MINUTE VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtMinute) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_second(t *testing.T) { + Convey("Test_vector_second_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_second:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtSecond, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 0, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST SECOND VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtSecond) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_datetime(t *testing.T) { + Convey("Test_vector_datetime_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_datetime:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtDatetime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 0, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST DATETIME VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtDatetime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_timestamp(t *testing.T) { + Convey("Test_vector_timestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_timestamp:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999000000, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999000000, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST TIMESTAMP VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtTimestamp) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_nanotime(t *testing.T) { + Convey("Test_vector_nanotime_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_nanotime:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtNanoTime, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + time1 := []time.Time{time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 23, 59, 59, 999999999, time.UTC), time.Date(1970, 1, 1, 15, 4, 4, 999999999, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST NANOTIME VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtNanoTime) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_nanotimestamp(t *testing.T) { + Convey("Test_vector_nanotimestamp_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_nanotimestamp:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtNanoTimestamp, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST NANOTIMESTAMP VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtNanoTimestamp) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_datehour(t *testing.T) { + Convey("Test_vector_datehour_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_datehour:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtDateHour, []time.Time{time.Date(2022, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(1969, 12, 31, 23, 59, 59, 999999999, time.UTC), time.Date(2006, 1, 2, 15, 4, 4, 999999999, time.UTC)}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + time1 := []time.Time{time.Date(2022, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(1969, 12, 31, 23, 0, 0, 0, time.UTC), time.Date(2006, 1, 2, 15, 0, 0, 0, time.UTC)} + var k int + for i := 0; i < len(re); i++ { + if re[i] == time1[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST DATEHOUR VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtDateHour) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_point(t *testing.T) { + Convey("Test_vector_point_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_point:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtPoint, [][2]float64{{1, 1}, {-1, -1024.5}, {1001022.4, -30028.75}}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []string{"(1.00000, 1.00000)", "(-1.00000, -1024.50000)", "(1001022.40000, -30028.75000)"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST POINT VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtPoint) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_complex(t *testing.T) { + Convey("Test_vector_complex_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_complex:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtComplex, [][2]float64{{1, 1}, {-1, -1024.5}, {1001022.4, -30028.75}}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []string{"1.00000+1.00000i", "-1.00000+-1024.50000i", "1001022.40000+-30028.75000i"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST COMPLEX VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtComplex) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_string(t *testing.T) { + Convey("Test_vector_string_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_string:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtString, []string{"hello", "#$%", "数据类型", "what"}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []string{"hello", "#$%", "数据类型", "what"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(STRING VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtString) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_any(t *testing.T) { + Convey("Test_vector_any_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_any:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtAny, model.DfVector) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(ANY VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtAny) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_bool(t *testing.T) { + Convey("Test_vector_bool_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_bool:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtBool, []bool{true, true, false, false}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []bool{true, true, false, false} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST BOOL VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtBool) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_blob(t *testing.T) { + Convey("Test_vector_blob_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_blob:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtBlob, [][]byte{{6}, {12}, {56}, {128}}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := [][]uint8{{6}, {12}, {56}, {128}} + for j := 0; j < len(re); j++ { + So(re[j], ShouldResemble, zx[j]) + } + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(BLOB VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtBlob) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_uuid(t *testing.T) { + Convey("Test_vector_uuid_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_uuid:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtUUID, []string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee88", "5d212a78-cc48-e3b1-4235-b4d91473ee89"}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee88", "5d212a78-cc48-e3b1-4235-b4d91473ee89"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST UUID VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtUUID) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_ipaddr(t *testing.T) { + Convey("Test_vector_ipaddr_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_vector_ipaddr:", func() { + dls, _ := model.NewDataTypeListWithRaw(model.DtIP, []string{"192.163.1.12", "0.0.0.0", "127.0.0.1"}) + s := model.NewVector(dls) + _, err := db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []string{"192.163.1.12", "0.0.0.0", "127.0.0.1"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST IPADDR VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtIP) + }) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_Datatype_int28(t *testing.T) { + Convey("Test_vector_int128_upload:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + dls, _ := model.NewDataTypeListWithRaw(model.DtInt128, []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33", "e1671797c52e15f763380b45e841ec34"}) + s := model.NewVector(dls) + _, err = db.Upload(map[string]model.DataForm{"s": s}) + res, _ := db.RunScript("s") + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector).Data.Value() + zx := []string{"e1671797c52e15f763380b45e841ec32", "e1671797c52e15f763380b45e841ec33", "e1671797c52e15f763380b45e841ec34"} + var k int + for i := 0; i < len(re); i++ { + if re[i] == zx[i] { + k++ + } + } + So(k, ShouldEqual, res.Rows()) + So(err, ShouldBeNil) + So(ty.String(), ShouldEqual, "string(FAST INT128 VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtInt128) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_int_array_vector(t *testing.T) { + Convey("Test_Vector_int_array_vector:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + int1v, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{-1024, 1048576, -1048579, 3000000}) + So(err, ShouldBeNil) + int2v, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{0, 1048576, model.NullInt, 3000000}) + So(err, ShouldBeNil) + int3v, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{model.NullInt, model.NullInt, model.NullInt, model.NullInt}) + So(err, ShouldBeNil) + av := model.NewArrayVector([]*model.Vector{model.NewVector(int1v), model.NewVector(int2v), model.NewVector(int3v)}) + s := model.NewVectorWithArrayVector(av) + _, err = db.Upload(map[string]model.DataForm{"s": s}) + So(err, ShouldBeNil) + res, err := db.RunScript("s") + So(err, ShouldBeNil) + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector) + So(re.Get(5).Value(), ShouldEqual, 1048576) + So(re.IsNull(6), ShouldBeTrue) + So(ty.String(), ShouldEqual, "string(FAST INT[] VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtInt+64) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_bool_array_vector(t *testing.T) { + Convey("Test_Vector_bool_array_vector:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + bool1v, err := model.NewDataTypeListWithRaw(model.DtBool, []byte{1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1}) + So(err, ShouldBeNil) + bool2v, err := model.NewDataTypeListWithRaw(model.DtBool, []byte{0, model.NullBool, 1}) + So(err, ShouldBeNil) + bool3v, err := model.NewDataTypeListWithRaw(model.DtBool, []byte{model.NullBool, model.NullBool, model.NullBool}) + So(err, ShouldBeNil) + av := model.NewArrayVector([]*model.Vector{model.NewVector(bool1v), model.NewVector(bool2v), model.NewVector(bool3v)}) + s := model.NewVectorWithArrayVector(av) + _, err = db.Upload(map[string]model.DataForm{"s": s}) + So(err, ShouldBeNil) + res, err := db.RunScript("s") + So(err, ShouldBeNil) + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector) + So(re.Get(0).Value(), ShouldEqual, true) + So(re.Get(6).Value(), ShouldEqual, false) + So(re.IsNull(12), ShouldBeTrue) + So(ty.String(), ShouldEqual, "string(FAST BOOL[] VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtBool+64) + So(db.Close(), ShouldBeNil) + }) +} +func Test_Vector_UpLoad_big_array_vector(t *testing.T) { + Convey("Test_Vector_int_big_array_vector:", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + var i int32 + sz := []int32{} + for i = 0; i < 1048579*12; i += 12 { + sz = append(sz, i) + } + int1v, err := model.NewDataTypeListWithRaw(model.DtInt, sz) + So(err, ShouldBeNil) + int2v, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{0, 1048576, model.NullInt, 3000000}) + So(err, ShouldBeNil) + int3v, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{model.NullInt, model.NullInt, model.NullInt, model.NullInt}) + So(err, ShouldBeNil) + av := model.NewArrayVector([]*model.Vector{model.NewVector(int1v), model.NewVector(int2v), model.NewVector(int3v)}) + s := model.NewVectorWithArrayVector(av) + _, err = db.Upload(map[string]model.DataForm{"s": s}) + So(err, ShouldBeNil) + res, err := db.RunScript("s") + So(err, ShouldBeNil) + ty, _ := db.RunScript("typestr(s)") + re := res.(*model.Vector) + So(re.ColumnCount, ShouldEqual, 1048587) + So(re.Get(5).Value(), ShouldEqual, 5*12) + So(re.Get(995).Value(), ShouldEqual, 995*12) + So(re.Get(133546).Value(), ShouldEqual, 133546*12) + So(re.IsNull(1048579+2), ShouldBeTrue) + So(ty.String(), ShouldEqual, "string(FAST INT[] VECTOR)") + So(res.GetDataType(), ShouldEqual, model.DtInt+64) + So(db.Close(), ShouldBeNil) + }) +} diff --git a/test/connectionPool_test.go b/test/connectionPool_test.go new file mode 100644 index 0000000..7106f4d --- /dev/null +++ b/test/connectionPool_test.go @@ -0,0 +1,1643 @@ +package test + +import ( + "context" + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +var dbconnPool, _ = api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + +func CheckConnectionPool(OriginConnectionNum []interface{}, NewConnectionNum []interface{}) bool { + for i := 0; i < len(OriginConnectionNum); i++ { + if OriginConnectionNum[i].(int32) >= NewConnectionNum[i].(int32) { + return false + } + } + return true +} + +func WaitConnectionPoolSuccess(OriginConnectionNum []interface{}) bool { + for { + NewConnectionNum := GetConnectionNum() + fmt.Println(NewConnectionNum) + res := CheckConnectionPool(OriginConnectionNum, NewConnectionNum) + if res == true { + break + } + time.Sleep(3 * time.Second) + continue + } + return true +} + +func GetConnectionNum() []interface{} { + Table, _ := dbconnPool.RunScript("select connectionNum, name from rpc(getControllerAlias(), getClusterPerf) where mode = 0") + tmpTable := Table.(*model.Table) + connectionNumList := tmpTable.GetColumnByName(tmpTable.GetColumnNames()[0]) + connectionNum := connectionNumList.Data.Value() + return connectionNum +} + +func GetOriginConnNum() []interface{} { + var OriginConnectionNum []interface{} + var i = 0 + for { + OriginConnectionNum = GetConnectionNum() + if OriginConnectionNum != nil && i == 9 { + break + } + i++ + } + time.Sleep(3 * time.Second) + OriginConnectionNum = GetConnectionNum() + return OriginConnectionNum +} + +func CheckConnectionNum(OriginConnectionNum []interface{}) bool { + Table, _ := dbconnPool.RunScript("select connectionNum, name from rpc(getControllerAlias(), getClusterPerf) where mode = 0") + tmpTable := Table.(*model.Table) + connectionNumList := tmpTable.GetColumnByName(tmpTable.GetColumnNames()[0]) + connectionNum := connectionNumList.Data.Value() + fmt.Printf("\nNewConnection:%v\n", connectionNum) + for i := 0; i < connectionNumList.Rows(); i++ { + if OriginConnectionNum[i].(int32) >= connectionNum[i].(int32) { + return false + } + for j := i; j < connectionNumList.Rows(); j++ { + if (connectionNum[j].(int32)-OriginConnectionNum[j].(int32))-(connectionNum[i].(int32)-OriginConnectionNum[i].(int32)) > 2 || (connectionNum[j].(int32)-OriginConnectionNum[j].(int32))-(connectionNum[i].(int32)-OriginConnectionNum[i].(int32)) < -2 { + return false + } + } + } + return true +} + +func TestDBConnectionPool_exception(t *testing.T) { + Convey("Test_function_DBConnectionPool_exception_test", t, func() { + Convey("Test_function_DBConnectionPool_wrong_address_exception \n", func() { + opt := &api.PoolOption{ + Address: "129.16.12.14", + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 2, + LoadBalance: false, + } + _, err := api.NewDBConnectionPool(opt) + So(err, ShouldNotBeNil) + }) + Convey("Test_function_DBConnectionPool_address_nil_exception \n", func() { + opt := &api.PoolOption{ + Address: "", + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 2, + LoadBalance: false, + } + _, err := api.NewDBConnectionPool(opt) + So(err, ShouldNotBeNil) + }) + Convey("Test_function_DBConnectionPool_wrong_userName_exception \n", func() { + opt := &api.PoolOption{ + Address: setup.Address, + UserID: "rootn1", + Password: setup.Password, + PoolSize: 2, + LoadBalance: false, + } + _, err := api.NewDBConnectionPool(opt) + So(err, ShouldNotBeNil) + }) + Convey("Test_function_DBConnectionPool_userName_null_exception \n", func() { + opt := &api.PoolOption{ + Address: setup.Address, + UserID: "", + Password: setup.Password, + PoolSize: 2, + LoadBalance: false, + } + _, err := api.NewDBConnectionPool(opt) + So(err, ShouldNotBeNil) + }) + Convey("Test_function_DBConnectionPool_wrong_Password_exception \n", func() { + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: "rpoot120@", + PoolSize: 2, + LoadBalance: false, + } + _, err := api.NewDBConnectionPool(opt) + So(err, ShouldNotBeNil) + }) + Convey("Test_function_DBConnectionPool_wrong_Password_special_symbol_exception \n", func() { + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: "!!!!!", + PoolSize: 2, + LoadBalance: false, + } + _, err := api.NewDBConnectionPool(opt) + So(err, ShouldNotBeNil) + }) + Convey("Test_function_DBConnectionPool_PoolSize_less_than_0_exception", func() { + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: -1, + LoadBalance: false, + } + _, err := api.NewDBConnectionPool(opt) + So(err, ShouldNotBeNil) + }) + Convey("Test_function_DBConnectionPool_SetLoadBalanceAddress_LoadBalance_false_exception", func() { + OriginConnectionNum := GetOriginConnNum() + fmt.Printf("\norigin connection:%v\n", OriginConnectionNum) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 5, + LoadBalance: false, + LoadBalanceAddresses: []string{setup.Address, setup.Address2, setup.Address3, setup.Address4}, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + re := pool.GetPoolSize() + So(re, ShouldEqual, 5) + closed := pool.IsClosed() + So(closed, ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + closed = pool.IsClosed() + So(closed, ShouldBeTrue) + }) + }) +} +func TestDBConnectionPool_Execute(t *testing.T) { + Convey("Test_function_DBConnectionPool_Execute", t, func() { + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 2, + LoadBalance: false, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + re := pool.GetPoolSize() + So(re, ShouldEqual, 2) + dt, err := model.NewDataType(model.DtString, "true") + So(err, ShouldBeNil) + s := model.NewScalar(dt) + task := &api.Task{ + Script: "typestr", + Args: []model.DataForm{s}, + } + err = pool.Execute([]*api.Task{task, task, task}) + So(err, ShouldBeNil) + err = task.GetError() + So(err, ShouldBeNil) + closed := pool.IsClosed() + So(closed, ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + closed = pool.IsClosed() + So(closed, ShouldBeTrue) + }) +} +func TestDBConnectionPool_LoadBalance(t *testing.T) { + Convey("Test_function_DBConnectionPool_LoadBalance_true", t, func() { + OriginConnectionNum := GetOriginConnNum() + fmt.Printf("\norigin connection:%v\n", OriginConnectionNum) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 5, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + re := pool.GetPoolSize() + So(re, ShouldEqual, 5) + IsSucess := WaitConnectionPoolSuccess(OriginConnectionNum) + So(IsSucess, ShouldBeTrue) + connBalance := CheckConnectionNum(OriginConnectionNum) + So(connBalance, ShouldBeTrue) + closed := pool.IsClosed() + So(closed, ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + closed = pool.IsClosed() + So(closed, ShouldBeTrue) + }) +} +func TestDBConnectionPool_SetLoadBalanceAddress(t *testing.T) { + Convey("Test_function_DBConnectionPool_SetLoadBalanceAddress", t, func() { + time.Sleep(3 * time.Second) + OriginConnectionNum := GetConnectionNum() + fmt.Printf("\norigin connection:%v\n", OriginConnectionNum) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 5, + LoadBalance: true, + LoadBalanceAddresses: []string{setup.Address, setup.Address2, setup.Address3, setup.Address4}, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + re := pool.GetPoolSize() + So(re, ShouldEqual, 5) + IsSucess := WaitConnectionPoolSuccess(OriginConnectionNum) + So(IsSucess, ShouldBeTrue) + connBalance := CheckConnectionNum(OriginConnectionNum) + So(connBalance, ShouldBeTrue) + closed := pool.IsClosed() + So(closed, ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + closed = pool.IsClosed() + So(closed, ShouldBeTrue) + }) +} + +func TestDBConnectionPool_hash_hash_string(t *testing.T) { + Convey("TestDBConnectionPool_hash_hash_string", t, func() { + _, err := dbconnPool.RunScript("t = table(timestamp(1..10) as datev,string(1..10) as sym)\n" + + "db1=database(\"\",HASH,[DATETIME,10])\n" + + "db2=database(\"\",HASH,[STRING,5])\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")}\n" + + "db=database(\"dfs://demohash\",COMPO,[db2,db1])\n" + + "pt=db.createPartitionedTable(t,`pt,`sym`datev)") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []string + var datetimearr []time.Time + for i := 0; i < 10000; i++ { + symarr = append(symarr, strconv.Itoa(i)) + datetimearr = append(datetimearr, time.Date(1969, time.Month(12), i, 23, i, 50, 000, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtString, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtTimestamp, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_value_hash_symbol(t *testing.T) { + Convey("TestDBConnectionPool_value_hash_symbol", t, func() { + _, err := dbconnPool.RunScript("t = table(timestamp(1..10) as datev,string(1..10) as sym)\n" + + "db1=database(\"\",VALUE,date(2022.01.01)+0..100)\n" + + "db2=database(\"\",HASH,[STRING,5])\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")}\n" + + "db=database(\"dfs://demohash\",COMPO,[db2,db1])\n" + + "pt=db.createPartitionedTable(t,`pt,`sym`datev)") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []string + var datetimearr []time.Time + for i := 0; i < 10000; i++ { + symarr = append(symarr, strconv.Itoa(i)) + datetimearr = append(datetimearr, time.Date(2022, time.Month(01), i, 23, 12, 50, 000, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtString, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtTimestamp, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_hash_hash_int(t *testing.T) { + Convey("TestDBConnectionPool_hash_hash_int", t, func() { + _, err := dbconnPool.RunScript("t = table(timestamp(1..10) as datev,1..10 as sym)\n" + + "db1=database(\"\",HASH,[DATETIME,10])\n" + + "db2=database(\"\",HASH,[INT,5])\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")}\n" + + "db=database(\"dfs://demohash\",COMPO,[db2,db1])\n" + + "pt=db.createPartitionedTable(t,`pt,`sym`datev)") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []int32 + var datetimearr []time.Time + for i := 0; i < 10000; i++ { + symarr = append(symarr, int32(i)) + datetimearr = append(datetimearr, time.Date(1969, time.Month(12), i, 23, i, 50, 000, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtInt, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtTimestamp, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_value_hash_datetime(t *testing.T) { + Convey("TestDBConnectionPool_value_hash_datetime", t, func() { + _, err := dbconnPool.RunScript("\n" + + "t = table(datetime(1..10) as datev,string(1..10) as sym)\n" + + "db2=database(\"\",VALUE,string(0..10))\n" + + "db1=database(\"\",HASH,[DATETIME,10])\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db=database(\"dfs://demohash\",COMPO,[db2,db1])\n" + + "pt=db.createPartitionedTable(t,`pt,`sym`datev)\n") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []string + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + rand.Seed(time.Now().Unix()) + symarr = append(symarr, strconv.Itoa(rand.Intn(10))) + datetimearr = append(datetimearr, time.Date(2022, time.Month(01), 1+i, 23, 12, 50, 000, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtString, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtDatetime, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_range_hash_date(t *testing.T) { + Convey("TestDBConnectionPool_range_hash_date", t, func() { + _, err := dbconnPool.RunScript("t = table(date(1..10) as datev,symbol(string(1..10)) as sym)\n" + + "db1=database(\"\",RANGE,date([0, 5, 11]))\n" + + "db2=database(\"\",HASH,[SYMBOL,15])\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db=database(\"dfs://demohash\",COMPO,[db1,db2])\n" + + "pt=db.createPartitionedTable(t,`pt,`datev`sym)\n") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []string + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + rand.Seed(time.Now().Unix()) + symarr = append(symarr, strconv.Itoa(rand.Intn(10))) + datetimearr = append(datetimearr, time.Date(1970, time.Month(01), 3, 23, 12, 50, 000, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtSymbol, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtDate, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_range_range_int(t *testing.T) { + Convey("TestDBConnectionPool_range_range_int", t, func() { + _, err := dbconnPool.RunScript("\n" + + "t = table(nanotimestamp(1..10) as datev, 1..10 as sym)\n" + + "db1=database(\"\",RANGE,date(1970.01.01)+0..100*5)\n" + + "db2=database(\"\",RANGE,0 2 4 6 8 11)\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db =database(\"dfs://demohash\",COMPO,[db1,db2])\n" + + "pt = db.createPartitionedTable(t,`pt,`datev`sym)") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []int32 + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + symarr = append(symarr, int32(rand.Intn(10))) + rand.Seed(time.Now().Unix()) + datetimearr = append(datetimearr, time.Date(1970, time.Month(01), 1+rand.Intn(300), 23, 12, 50, 789456478, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtInt, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtNanoTimestamp, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_value_range_int(t *testing.T) { + Convey("TestDBConnectionPool_value_range_int", t, func() { + _, err := dbconnPool.RunScript("\n" + + "t = table(timestamp(1..10) as datev,1..10 as sym)\n" + + "db1=database(\"\",VALUE,date(1970.01.01)+0..10)\n" + + "db2=database(\"\",RANGE,0 2 4 6 8 11)\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db =database(\"dfs://demohash\",COMPO,[db1,db2])\n" + + "pt = db.createPartitionedTable(t,`pt,`datev`sym)") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []int32 + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + symarr = append(symarr, int32(rand.Intn(10))) + rand.Seed(time.Now().Unix()) + datetimearr = append(datetimearr, time.Date(1970, time.Month(01), 1+rand.Intn(10), 23, 12, 50, 789456478, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtInt, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtTimestamp, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_range_range_month(t *testing.T) { + Convey("TestDBConnectionPool_range_range_month", t, func() { + _, err := dbconnPool.RunScript("\n" + + "t = table(nanotimestamp(1..10) as datev,1..10 as sym)\n" + + "db2=database(\"\",RANGE,0 2 4 6 8 11)\n" + + "db1=database(\"\",RANGE,month(1970.01M)+0..100*5)\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db =database(\"dfs://demohash\",COMPO,[db2,db1])\n" + + "pt = db.createPartitionedTable(t,`pt,`sym`datev)") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []int32 + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + symarr = append(symarr, int32(rand.Intn(10))) + rand.Seed(time.Now().Unix()) + datetimearr = append(datetimearr, time.Date(1970, time.Month(01), 1+rand.Intn(10), 23, 12, 50, 789456478, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtInt, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtNanoTimestamp, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_hash_range_date(t *testing.T) { + Convey("TestDBConnectionPool_hash_range_date", t, func() { + _, err := dbconnPool.RunScript("\n" + + "t = table(nanotimestamp(1..10) as datev, symbol(string(1..10)) as sym)\n" + + "db2=database(\"\",HASH,[SYMBOL,5])\n" + + "db1=database(\"\",RANGE,date(1970.01.01)+0..100)\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db =database(\"dfs://demohash\",COMPO,[db2,db1])\n" + + "pt = db.createPartitionedTable(t,`pt,`sym`datev)") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []string + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + symarr = append(symarr, strconv.Itoa(rand.Intn(10))) + rand.Seed(time.Now().Unix()) + datetimearr = append(datetimearr, time.Date(1970, time.Month(01), 1+rand.Intn(10), 23, 12, 50, 789456478, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtSymbol, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtNanoTimestamp, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_hash_range_datetime(t *testing.T) { + Convey("TestDBConnectionPool_hash_range_datetime", t, func() { + _, err := dbconnPool.RunScript("\n" + + "t = table(datetime(1..10) as datev, symbol(string(1..10)) as sym)\n" + + "db2=database(\"\",HASH,[SYMBOL,5])\n" + + "db1=database(\"\",RANGE,datetime(1970.01.01T01:01:01)+0..10000*2)\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db =database(\"dfs://demohash\",COMPO,[db2,db1])\n" + + "pt = db.createPartitionedTable(t,`pt,`sym`datev)") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []string + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + symarr = append(symarr, strconv.Itoa(rand.Intn(10))) + rand.Seed(time.Now().Unix()) + datetimearr = append(datetimearr, time.Date(1970, time.Month(01), 01, 01, 01, 01+rand.Intn(10), 000, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtSymbol, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtDatetime, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_hash_value_symbol(t *testing.T) { + Convey("TestDBConnectionPool_hash_value_symbol", t, func() { + _, err := dbconnPool.RunScript("\n" + + "t = table(datetime(1..10) as datev, symbol(string(1..10)) as sym)\n" + + "db1=database(\"\",HASH,[DATETIME,10])\n" + + "db2=database(\"\",VALUE,string(1..10))\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db =database(\"dfs://demohash\",COMPO,[db1,db2])\n" + + "pt = db.createPartitionedTable(t,`pt,`datev`sym)") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []string + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + symarr = append(symarr, strconv.Itoa(rand.Intn(10))) + rand.Seed(time.Now().Unix()) + datetimearr = append(datetimearr, time.Date(2020, time.Month(02), 02, 01, 01, 01+rand.Intn(10), 000, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtSymbol, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtDatetime, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_value_value_date(t *testing.T) { + Convey("TestDBConnectionPool_value_value_date", t, func() { + _, err := dbconnPool.RunScript("\n" + + "t = table(timestamp(1..10) as datev,string(1..10) as sym)\n" + + "db2=database(\"\",VALUE,string(1..10))\n" + + "db1=database(\"\",VALUE,date(2020.02.02)+0..100)\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db =database(\"dfs://demohash\",COMPO,[db2,db1])\n" + + "pt = db.createPartitionedTable(t,`pt,`sym`datev)\n") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []string + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + symarr = append(symarr, strconv.Itoa(rand.Intn(10))) + rand.Seed(time.Now().Unix()) + datetimearr = append(datetimearr, time.Date(2020, time.Month(02), 02, 01, 01, 01+rand.Intn(10), 000, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtSymbol, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtTimestamp, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_value_value_month(t *testing.T) { + Convey("TestDBConnectionPool_value_value_month", t, func() { + _, err := dbconnPool.RunScript("\n" + + "t = table(timestamp(1..10) as datev,string(1..10) as sym)\n" + + "db2=database(\"\",VALUE,string(1..10))\n" + + "db1=database(\"\",VALUE,month(2020.02M)+0..100)\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db =database(\"dfs://demohash\",COMPO,[db2,db1])\n" + + "pt = db.createPartitionedTable(t,`pt,`sym`datev)\n") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []string + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + symarr = append(symarr, strconv.Itoa(rand.Intn(10))) + rand.Seed(time.Now().Unix()) + datetimearr = append(datetimearr, time.Date(2020, time.Month(02), 02, 01, 01, 01+rand.Intn(10), 000, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtSymbol, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtTimestamp, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_range_value_int(t *testing.T) { + Convey("TestDBConnectionPool_range_value_int", t, func() { + _, err := dbconnPool.RunScript("\n" + + "t = table(timestamp(1..10) as datev,int(1..10) as sym)\n" + + "db1=database(\"\",VALUE,date(now())+0..100)\n" + + "db2=database(\"\",RANGE,int(0..11))\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db =database(\"dfs://demohash\",COMPO,[db1,db2])\n" + + "pt = db.createPartitionedTable(t,`pt,`datev`sym)\n") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []int32 + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + symarr = append(symarr, int32(rand.Intn(10))) + rand.Seed(time.Now().Unix()) + datetimearr = append(datetimearr, time.Date(2020, time.Month(02), 02, 01, 01, 01+rand.Intn(10), 000, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtInt, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtTimestamp, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestDBConnectionPool_loadBalance_false(t *testing.T) { + Convey("TestDBConnectionPool_loadBalance_false", t, func() { + _, err := dbconnPool.RunScript("\n" + + "t = table(timestamp(1..10) as datev,int(1..10) as sym)\n" + + "db1=database(\"\",VALUE,date(now())+0..100)\n" + + "db2=database(\"\",RANGE,int(0..11))\n" + + "if(existsDatabase(\"dfs://demohash\")){\n" + + "\tdropDatabase(\"dfs://demohash\")\n" + + "}\n" + + "db =database(\"dfs://demohash\",COMPO,[db1,db2])\n" + + "pt = db.createPartitionedTable(t,`pt,`datev`sym)\n") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 3, + LoadBalance: false, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://demohash", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + var symarr []int32 + var datetimearr []time.Time + rand.Seed(time.Now().Unix()) + for i := 0; i < 10000; i++ { + symarr = append(symarr, int32(rand.Intn(10))) + rand.Seed(time.Now().Unix()) + datetimearr = append(datetimearr, time.Date(2020, time.Month(02), 02, 01, 01, 01+rand.Intn(10), 000, time.UTC)) + } + sym, err := model.NewDataTypeListWithRaw(model.DtInt, symarr) + So(err, ShouldBeNil) + datetimev, err := model.NewDataTypeListWithRaw(model.DtTimestamp, datetimearr) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"datev", "sym"}, []*model.Vector{model.NewVector(datetimev), model.NewVector(sym)}) + for i := 0; i < 100; i++ { + num, err := appender.Append(newtable) + AssertNil(err) + AssertEqual(num, 10000) + } + re, err := dbconnPool.RunScript("pt= loadTable(\"dfs://demohash\",`pt)\n" + + "exec count(*) from pt") + So(err, ShouldBeNil) + resultCount := re.(*model.Scalar).Value() + So(resultCount, ShouldEqual, int64(1000000)) + So(pool.IsClosed(), ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + So(pool.IsClosed(), ShouldBeTrue) + }) +} + +func TestPartitionedTableAppender(t *testing.T) { + Convey("Test_function_PartitionedTableAppender_prepare", t, func() { + Convey("Test_function_PartitionedTableAppender_range_int", func() { + _, err := dbconnPool.RunScript(` + dbPath = "dfs://PTA_test" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + t = table(100:100, ["sym", "id", "datev", "price"],[SYMBOL, INT, DATE, DOUBLE]) + db=database(dbPath, RANGE, [0, 11, 21, 31]) + pt = db.createPartitionedTable(t, "pt", "id") + `) + So(err, ShouldBeNil) + pool := CreateDBConnectionPool(10, false) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://PTA_test", + TableName: "pt", + PartitionCol: "id", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + sym, err := model.NewDataTypeListWithRaw(model.DtString, []string{"AAPL", "BLS", "DBKS", "NDLN", "DBKS"}) + So(err, ShouldBeNil) + id, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{2, 10, 12, 22, 23}) + So(err, ShouldBeNil) + datev, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(1970, time.Month(1), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(3), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(10), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(5), 1, 1, 1, 0, 0, time.UTC)}) + So(err, ShouldBeNil) + price, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{21.2, 4.4, 5.5, 2.3, 6.6}) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"sym", "id", "datev", "price"}, []*model.Vector{model.NewVector(sym), model.NewVector(id), model.NewVector(datev), model.NewVector(price)}) + num, err := appender.Append(newtable) + So(err, ShouldBeNil) + So(num, ShouldEqual, 5) + re, err := dbconnPool.RunScript("select * from loadTable('dfs://PTA_test', 'pt')") + So(err, ShouldBeNil) + resultTable := re.(*model.Table) + resultSym := resultTable.GetColumnByName("sym").Data.Value() + tmp := []string{"AAPL", "BLS", "DBKS", "NDLN", "DBKS"} + for i := 0; i < resultTable.Rows(); i++ { + So(resultSym[i], ShouldEqual, tmp[i]) + } + resultID := resultTable.GetColumnByName("id") + So(resultID, ShouldResemble, model.NewVector(id)) + resultDatev := resultTable.GetColumnByName("datev") + So(resultDatev, ShouldResemble, model.NewVector(datev)) + resultPrice := resultTable.GetColumnByName("price") + So(resultPrice, ShouldResemble, model.NewVector(price)) + err = pool.Close() + So(err, ShouldBeNil) + }) + Convey("Test_function_PartitionedTableAppender_value_symbol", func() { + _, err := dbconnPool.RunScript(` + dbPath = "dfs://PTA_test" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + t = table(100:100, ["sym", "id", "datev", "price"],[SYMBOL, INT, DATE, DOUBLE]) + db=database(dbPath, VALUE, symbol("A"+string(1..6))) + pt = db.createPartitionedTable(t, "pt", "sym") + `) + So(err, ShouldBeNil) + pool := CreateDBConnectionPool(10, false) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://PTA_test", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + sym, err := model.NewDataTypeListWithRaw(model.DtString, []string{"A1", "A2", "A3", "A4", "A5"}) + So(err, ShouldBeNil) + id, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{2, 7, 12, 22, 24}) + So(err, ShouldBeNil) + datev, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(1970, time.Month(1), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(3), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(10), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(5), 1, 1, 1, 0, 0, time.UTC)}) + So(err, ShouldBeNil) + price, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{21.2, 4.4, 5.5, 2.3, 6.6}) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"sym", "id", "datev", "price"}, []*model.Vector{model.NewVector(sym), model.NewVector(id), model.NewVector(datev), model.NewVector(price)}) + num, err := appender.Append(newtable) + So(err, ShouldBeNil) + So(num, ShouldEqual, 5) + re, err := dbconnPool.RunScript("select * from loadTable('dfs://PTA_test', 'pt') order by id, sym, datev, price") + So(err, ShouldBeNil) + resultTable := re.(*model.Table) + resultSym := resultTable.GetColumnByName("sym").Data.Value() + tmp := []string{"A1", "A2", "A3", "A4", "A5"} + for i := 0; i < resultTable.Rows(); i++ { + So(resultSym[i], ShouldEqual, tmp[i]) + } + resultID := resultTable.GetColumnByName("id") + So(resultID, ShouldResemble, model.NewVector(id)) + resultDatev := resultTable.GetColumnByName("datev") + So(resultDatev, ShouldResemble, model.NewVector(datev)) + resultPrice := resultTable.GetColumnByName("price") + So(resultPrice, ShouldResemble, model.NewVector(price)) + err = pool.Close() + So(err, ShouldBeNil) + }) + Convey("Test_function_PartitionedTableAppender_hash_symbol", func() { + _, err := dbconnPool.RunScript(` + dbPath = "dfs://PTA_test" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + t = table(100:100, ["sym", "id", "datev", "price"],[SYMBOL, INT, DATE, DOUBLE]) + db=database(dbPath, HASH, [SYMBOL, 5]) + pt = db.createPartitionedTable(t, "pt", "sym") + `) + So(err, ShouldBeNil) + pool := CreateDBConnectionPool(10, false) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://PTA_test", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + sym, err := model.NewDataTypeListWithRaw(model.DtString, []string{"A1", "A2", "A3", "A4", "A5"}) + So(err, ShouldBeNil) + id, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{2, 7, 12, 22, 24}) + So(err, ShouldBeNil) + datev, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(1970, time.Month(1), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(3), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(10), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(5), 1, 1, 1, 0, 0, time.UTC)}) + So(err, ShouldBeNil) + price, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{21.2, 4.4, 5.5, 2.3, 6.6}) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"sym", "id", "datev", "price"}, []*model.Vector{model.NewVector(sym), model.NewVector(id), model.NewVector(datev), model.NewVector(price)}) + num, err := appender.Append(newtable) + So(err, ShouldBeNil) + So(num, ShouldEqual, 5) + re, err := dbconnPool.RunScript("select * from loadTable('dfs://PTA_test', 'pt') order by id, sym, datev, price") + So(err, ShouldBeNil) + resultTable := re.(*model.Table) + resultSym := resultTable.GetColumnByName("sym").Data.Value() + tmp := []string{"A1", "A2", "A3", "A4", "A5"} + for i := 0; i < resultTable.Rows(); i++ { + So(resultSym[i], ShouldEqual, tmp[i]) + } + resultID := resultTable.GetColumnByName("id") + So(resultID, ShouldResemble, model.NewVector(id)) + resultDatev := resultTable.GetColumnByName("datev") + So(resultDatev, ShouldResemble, model.NewVector(datev)) + resultPrice := resultTable.GetColumnByName("price") + So(resultPrice, ShouldResemble, model.NewVector(price)) + err = pool.Close() + So(err, ShouldBeNil) + }) + Convey("Test_function_PartitionedTableAppender_list_symbol", func() { + _, err := dbconnPool.RunScript(` + dbPath = "dfs://PTA_test" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + t = table(100:100, ["sym", "id", "datev", "price"],[SYMBOL, INT, DATE, DOUBLE]) + db=database(dbPath, LIST, [["A1", "A2"], ["A3", "A4", "A5"]]) + pt = db.createPartitionedTable(t, "pt", "sym") + `) + So(err, ShouldBeNil) + pool := CreateDBConnectionPool(10, false) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://PTA_test", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + sym, err := model.NewDataTypeListWithRaw(model.DtString, []string{"A1", "A2", "A3", "A4", "A5"}) + So(err, ShouldBeNil) + id, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{2, 7, 12, 22, 24}) + So(err, ShouldBeNil) + datev, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(1970, time.Month(1), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(3), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(10), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(5), 1, 1, 1, 0, 0, time.UTC)}) + So(err, ShouldBeNil) + price, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{21.2, 4.4, 5.5, 2.3, 6.6}) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"sym", "id", "datev", "price"}, []*model.Vector{model.NewVector(sym), model.NewVector(id), model.NewVector(datev), model.NewVector(price)}) + num, err := appender.Append(newtable) + So(err, ShouldBeNil) + So(num, ShouldEqual, 5) + re, err := dbconnPool.RunScript("select * from loadTable('dfs://PTA_test', 'pt') order by id, sym, datev, price") + So(err, ShouldBeNil) + resultTable := re.(*model.Table) + resultSym := resultTable.GetColumnByName("sym").Data.Value() + tmp := []string{"A1", "A2", "A3", "A4", "A5"} + for i := 0; i < resultTable.Rows(); i++ { + So(resultSym[i], ShouldEqual, tmp[i]) + } + resultID := resultTable.GetColumnByName("id") + So(resultID, ShouldResemble, model.NewVector(id)) + resultDatev := resultTable.GetColumnByName("datev") + So(resultDatev, ShouldResemble, model.NewVector(datev)) + resultPrice := resultTable.GetColumnByName("price") + So(resultPrice, ShouldResemble, model.NewVector(price)) + err = pool.Close() + So(err, ShouldBeNil) + }) + Convey("Test_function_PartitionedTableAppender_compo_value_list_symbol", func() { + _, err := dbconnPool.RunScript(` + dbPath = "dfs://PTA_test" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + t=table(100:100, ["sym", "id", "datev", "price"], [SYMBOL, INT, DATE, DOUBLE]) + db1=database(, VALUE, 1969.12.30..1970.01.03) + db=database(dbPath, LIST, [["A1", "A2"], ["A3", "A4", "A5"]]) + pt=db.createPartitionedTable(t, "pt", "sym") + `) + So(err, ShouldBeNil) + pool := CreateDBConnectionPool(10, false) + appenderOpt := &api.PartitionedTableAppenderOption{ + Pool: pool, + DBPath: "dfs://PTA_test", + TableName: "pt", + PartitionCol: "sym", + } + appender, err := api.NewPartitionedTableAppender(appenderOpt) + So(err, ShouldBeNil) + sym, err := model.NewDataTypeListWithRaw(model.DtString, []string{"A1", "A2", "A3", "A4", "A5"}) + So(err, ShouldBeNil) + id, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{2, 7, 12, 22, 24}) + So(err, ShouldBeNil) + datev, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(1970, time.Month(1), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(3), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(10), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(5), 1, 1, 1, 0, 0, time.UTC)}) + So(err, ShouldBeNil) + price, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{21.2, 4.4, 5.5, 2.3, 6.6}) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"sym", "id", "datev", "price"}, []*model.Vector{model.NewVector(sym), model.NewVector(id), model.NewVector(datev), model.NewVector(price)}) + num, err := appender.Append(newtable) + // fmt.Println(newtable) + So(err, ShouldBeNil) + So(num, ShouldEqual, 5) + re, err := dbconnPool.RunScript("select * from loadTable('dfs://PTA_test', 'pt') order by id, sym, datev, price") + So(err, ShouldBeNil) + resultTable := re.(*model.Table) + resultSym := resultTable.GetColumnByName("sym").Data.Value() + tmp := []string{"A1", "A2", "A3", "A4", "A5"} + for i := 0; i < resultTable.Rows(); i++ { + So(resultSym[i], ShouldEqual, tmp[i]) + } + resultID := resultTable.GetColumnByName("id") + So(resultID, ShouldResemble, model.NewVector(id)) + resultDatev := resultTable.GetColumnByName("datev") + So(resultDatev, ShouldResemble, model.NewVector(datev)) + resultPrice := resultTable.GetColumnByName("price") + So(resultPrice, ShouldResemble, model.NewVector(price)) + err = pool.Close() + So(err, ShouldBeNil) + }) + }) +} + +func TestDBConnectionPool_task(t *testing.T) { + Convey("TestDBConnectionPool_task_equal_PoolSize", t, func() { + _, err := dbconnPool.RunScript("db_path = \"dfs://test_DBConnectionPool\";\n" + + "if(existsDatabase(db_path)){\n" + + " dropDatabase(db_path)\n" + + "}\n" + + "db = database(db_path, VALUE, 1..100);\n" + + "t = table(10:0,`id`sym`price`nodePort,[INT,SYMBOL,DOUBLE,INT])\n" + + "pt1 = db.createPartitionedTable(t,`pt1,`id)") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 100, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + re := pool.GetPoolSize() + So(re, ShouldEqual, 100) + taskList := []*api.Task{} + for i := 0; i < 100; i++ { + task := &api.Task{ + Script: "t = table(int(take(" + strconv.Itoa(i) + ",100)) as id,rand(`a`b`c`d,100) as sym,int(rand(100,100)) as price,take(getNodePort(),100) as node);" + + "pt = loadTable(\"dfs://test_DBConnectionPool\",`pt1);" + + "pt.append!(t)", + } + taskList = append(taskList, task) + } + err = pool.Execute(taskList) + So(err, ShouldBeNil) + resultData, err := dbconnPool.RunScript("int(exec count(*) from loadTable(\"dfs://test_DBConnectionPool\",`pt1))") + So(err, ShouldBeNil) + resultCount := resultData.(*model.Scalar) + So(resultCount.Value(), ShouldEqual, 10000) + reNodesPort, err := dbconnPool.RunScript("exec nodePort from loadTable(\"dfs://test_DBConnectionPool\",`pt1) group by nodePort order by nodePort") + So(err, ShouldBeNil) + exNodesPort, err := dbconnPool.RunScript("exec value from pnodeRun(getNodePort) order by value") + So(err, ShouldBeNil) + So(reNodesPort.String(), ShouldEqual, exNodesPort.String()) + closed := pool.IsClosed() + So(closed, ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + closed = pool.IsClosed() + So(closed, ShouldBeTrue) + }) + Convey("TestDBConnectionPool_task_large_than_PoolSize", t, func() { + _, err := dbconnPool.RunScript("db_path = \"dfs://test_DBConnectionPool\";\n" + + "if(existsDatabase(db_path)){\n" + + " dropDatabase(db_path)\n" + + "}\n" + + "db = database(db_path, VALUE, 1..100);\n" + + "t = table(10:0,`id`sym`price`nodePort,[INT,SYMBOL,DOUBLE,INT])\n" + + "pt1 = db.createPartitionedTable(t,`pt1,`id)") + So(err, ShouldBeNil) + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: 10, + LoadBalance: true, + } + pool, err := api.NewDBConnectionPool(opt) + So(err, ShouldBeNil) + re := pool.GetPoolSize() + So(re, ShouldEqual, 10) + taskList := []*api.Task{} + for i := 0; i < 100; i++ { + task := &api.Task{ + Script: "t = table(int(take(" + strconv.Itoa(i) + ",100)) as id,rand(`a`b`c`d,100) as sym,int(rand(100,100)) as price,take(getNodePort(),100) as node);" + + "pt = loadTable(\"dfs://test_DBConnectionPool\",`pt1);" + + "pt.append!(t)", + } + taskList = append(taskList, task) + } + err = pool.Execute(taskList) + So(err, ShouldBeNil) + resultData, err := dbconnPool.RunScript("int(exec count(*) from loadTable(\"dfs://test_DBConnectionPool\",`pt1))") + So(err, ShouldBeNil) + resultCount := resultData.(*model.Scalar) + So(resultCount.Value(), ShouldEqual, 10000) + reNodesPort, err := dbconnPool.RunScript("exec nodePort from loadTable(\"dfs://test_DBConnectionPool\",`pt1) group by nodePort order by nodePort") + So(err, ShouldBeNil) + exNodesPort, err := dbconnPool.RunScript("exec value from pnodeRun(getNodePort) order by value") + So(err, ShouldBeNil) + So(reNodesPort.String(), ShouldEqual, exNodesPort.String()) + closed := pool.IsClosed() + So(closed, ShouldBeFalse) + err = pool.Close() + So(err, ShouldBeNil) + closed = pool.IsClosed() + So(closed, ShouldBeTrue) + }) +} + +func TestTableAppender(t *testing.T) { + Convey("Test_function_TableAppender_prepare", t, func() { + Convey("Test_function_TableAppender_range_int", func() { + _, err := dbconnPool.RunScript(` + t = table(100:0, ["sym", "id", "datev", "price"],[SYMBOL, INT, DATE, DOUBLE]) + `) + So(err, ShouldBeNil) + appenderOpt := &api.TableAppenderOption{ + TableName: "t", + Conn: dbconnPool, + } + appender := api.NewTableAppender(appenderOpt) + sym, err := model.NewDataTypeListWithRaw(model.DtString, []string{"AAPL", "BLS", "DBKS", "NDLN", "DBKS"}) + So(err, ShouldBeNil) + id, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{2, 10, 12, 22, 23}) + So(err, ShouldBeNil) + datev, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(1970, time.Month(1), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(3), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(10), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(5), 1, 1, 1, 0, 0, time.UTC)}) + So(err, ShouldBeNil) + price, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{21.2, 4.4, 5.5, 2.3, 6.6}) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"sym", "id", "datev", "price"}, []*model.Vector{model.NewVector(sym), model.NewVector(id), model.NewVector(datev), model.NewVector(price)}) + // fmt.Println(newtable) + _, err = appender.Append(newtable) + So(err, ShouldBeNil) + re, err := dbconnPool.RunScript("t") + So(err, ShouldBeNil) + resultTable := re.(*model.Table) + resultSym := resultTable.GetColumnByName("sym").Data.Value() + tmp := []string{"AAPL", "BLS", "DBKS", "NDLN", "DBKS"} + for i := 0; i < resultTable.Rows(); i++ { + So(resultSym[i], ShouldEqual, tmp[i]) + } + resultID := resultTable.GetColumnByName("id") + So(resultID, ShouldResemble, model.NewVector(id)) + resultDatev := resultTable.GetColumnByName("datev") + So(resultDatev, ShouldResemble, model.NewVector(datev)) + resultPrice := resultTable.GetColumnByName("price") + So(resultPrice, ShouldResemble, model.NewVector(price)) + IsClose := appender.IsClosed() + So(IsClose, ShouldBeFalse) + err = appender.Close() + So(err, ShouldBeNil) + IsClose = appender.IsClosed() + So(IsClose, ShouldBeTrue) + }) + Convey("Test_function_TableAppender_disk", func() { + dbconnPoolx, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + _, err = dbconnPoolx.RunScript(` + dbPath = "` + DiskDBPath + `" + if(exists(dbPath)) + rmdir(dbPath, true) + t = table(100:100, ["sym", "id", "datev", "price"],[SYMBOL, INT, DATE, DOUBLE]) + db=database(dbPath, RANGE, symbol("A"+string(1..7))) + pt = db.createPartitionedTable(t, "pt", "sym") + `) + So(err, ShouldBeNil) + appenderOpt := &api.TableAppenderOption{ + DBPath: DiskDBPath, + TableName: "pt", + Conn: dbconnPoolx, + } + appender := api.NewTableAppender(appenderOpt) + So(err, ShouldBeNil) + sym, err := model.NewDataTypeListWithRaw(model.DtString, []string{"A1", "A2", "A3", "A4", "A5"}) + So(err, ShouldBeNil) + id, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{2, 7, 12, 22, 24}) + So(err, ShouldBeNil) + datev, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(1970, time.Month(1), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(3), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(10), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(5), 1, 1, 1, 0, 0, time.UTC)}) + So(err, ShouldBeNil) + price, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{21.2, 4.4, 5.5, 2.3, 6.6}) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"sym", "id", "datev", "price"}, []*model.Vector{model.NewVector(sym), model.NewVector(id), model.NewVector(datev), model.NewVector(price)}) + _, err = appender.Append(newtable) + So(err, ShouldBeNil) + re, err := dbconnPoolx.RunScript("select * from loadTable(\"" + DiskDBPath + "\", 'pt') order by id, sym, datev, price") + So(err, ShouldBeNil) + resultTable := re.(*model.Table) + resultSym := resultTable.GetColumnByName("sym").Data.Value() + tmp := []string{"A1", "A2", "A3", "A4", "A5"} + for i := 0; i < resultTable.Rows(); i++ { + So(resultSym[i], ShouldEqual, tmp[i]) + } + resultID := resultTable.GetColumnByName("id") + So(resultID, ShouldResemble, model.NewVector(id)) + resultDatev := resultTable.GetColumnByName("datev") + So(resultDatev, ShouldResemble, model.NewVector(datev)) + resultPrice := resultTable.GetColumnByName("price") + So(resultPrice, ShouldResemble, model.NewVector(price)) + IsClose := appender.IsClosed() + So(IsClose, ShouldBeFalse) + err = appender.Close() + So(err, ShouldBeNil) + IsClose = appender.IsClosed() + So(IsClose, ShouldBeTrue) + dbconnPoolx.Close() + }) + Convey("Test_function_TableAppender_dfsTable", func() { + dbconnPoolx, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + _, err = dbconnPoolx.RunScript(` + dbPath = "` + DfsDBPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + t = table(100:100, ["sym", "id", "datev", "price"],[SYMBOL, INT, DATE, DOUBLE]) + db=database(dbPath, VALUE, symbol("A"+string(1..6))) + pt = db.createPartitionedTable(t, "pt", "sym") + `) + So(err, ShouldBeNil) + pool := CreateDBConnectionPool(10, false) + appenderOpt := &api.TableAppenderOption{ + DBPath: DfsDBPath, + TableName: "pt", + Conn: dbconnPoolx, + } + appender := api.NewTableAppender(appenderOpt) + So(err, ShouldBeNil) + sym, err := model.NewDataTypeListWithRaw(model.DtString, []string{"A1", "A2", "A3", "A4", "A5"}) + So(err, ShouldBeNil) + id, err := model.NewDataTypeListWithRaw(model.DtInt, []int32{2, 7, 12, 22, 24}) + So(err, ShouldBeNil) + datev, err := model.NewDataTypeListWithRaw(model.DtDate, []time.Time{time.Date(1970, time.Month(1), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(3), 1, 1, 1, 0, 0, time.UTC), time.Date(1969, time.Month(10), 1, 1, 1, 0, 0, time.UTC), time.Date(1970, time.Month(5), 1, 1, 1, 0, 0, time.UTC)}) + So(err, ShouldBeNil) + price, err := model.NewDataTypeListWithRaw(model.DtDouble, []float64{21.2, 4.4, 5.5, 2.3, 6.6}) + So(err, ShouldBeNil) + newtable := model.NewTable([]string{"sym", "id", "datev", "price"}, []*model.Vector{model.NewVector(sym), model.NewVector(id), model.NewVector(datev), model.NewVector(price)}) + _, err = appender.Append(newtable) + So(err, ShouldBeNil) + re, err := dbconnPoolx.RunScript("select * from loadTable('" + DfsDBPath + "', 'pt') order by id, sym, datev, price") + So(err, ShouldBeNil) + resultTable := re.(*model.Table) + resultSym := resultTable.GetColumnByName("sym").Data.Value() + tmp := []string{"A1", "A2", "A3", "A4", "A5"} + for i := 0; i < resultTable.Rows(); i++ { + So(resultSym[i], ShouldEqual, tmp[i]) + } + resultID := resultTable.GetColumnByName("id") + So(resultID, ShouldResemble, model.NewVector(id)) + resultDatev := resultTable.GetColumnByName("datev") + So(resultDatev, ShouldResemble, model.NewVector(datev)) + resultPrice := resultTable.GetColumnByName("price") + So(resultPrice, ShouldResemble, model.NewVector(price)) + err = pool.Close() + So(err, ShouldBeNil) + err = dbconnPoolx.Close() + So(err, ShouldBeNil) + }) + }) +} + +func TestDBconnPoolClose(t *testing.T) { + dbconnPool.Close() +} diff --git a/test/createDatabase_test.go b/test/createDatabase_test.go new file mode 100644 index 0000000..080f45e --- /dev/null +++ b/test/createDatabase_test.go @@ -0,0 +1,624 @@ +package test + +import ( + "context" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" + "github.com/stretchr/testify/assert" +) + +func TestCreateDatabase(t *testing.T) { + Convey("Test_CreateDatabase_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_CreateDatabase_dropDatabase", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + } + }) + Convey("Test_CreateDatabase_olap_value_partition", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + database, err := CreateDatabase(ddb, DfsDBPath, DBhandler, "VALUE", "2010.01.01..2010.01.30", "", "", "") + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + re3, err := ddb.RunScript("schema(db)") + So(err, ShouldBeNil) + re4 := re3.(*model.Dictionary) + rePartitionType, _ := re4.Get("partitionType") + So(rePartitionType.Value().(*model.Scalar).Value(), ShouldEqual, 1) + reChunkGranularity, _ := re4.Get("chunkGranularity") + So(reChunkGranularity.Value().(*model.Scalar).Value(), ShouldEqual, "TABLE") + reAtomic, _ := re4.Get("atomic") + So(reAtomic.Value().(*model.Scalar).Value(), ShouldEqual, "TRANS") + rePartitionSites, _ := re4.Get("partitionSites") + So(rePartitionSites.Value().(*model.Scalar).IsNull(), ShouldBeTrue) + rePartitionTypeName, _ := re4.Get("partitionTypeName") + So(rePartitionTypeName.Value().(*model.Scalar).Value(), ShouldEqual, "VALUE") + rePartitionSchema, _ := re4.Get("partitionSchema") + j := 0 + for i := 30; i > 0; i-- { + datev := time.Date(2010, time.January, i, 0, 0, 0, 0, time.UTC) + tmpPartitionSchema := append([]time.Time{}, datev) + So(rePartitionSchema.Value().(*model.Vector).Data.Value()[j], ShouldEqual, tmpPartitionSchema[0]) + j++ + } + reDatabaseDir, _ := re4.Get("databaseDir") + So(reDatabaseDir.Value().(*model.Scalar).Value(), ShouldEqual, DfsDBPath) + _, err = ddb.RunScript("n=10") + So(err, ShouldBeNil) + _, err = CreateMemTable(ddb, "t", "datev", "id", "sym", "val", "sort(take(2010.01.01..2010.12.31, n))", "1..n", `take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n)`, "take([39, 50, 5, 24, 79, 39, 8, 67, 29, 55], n)") + So(err, ShouldBeNil) + // create dfsTable + dfsTable, err := CreateDefPartitionedTable(database, "t", DfsTBName1, []string{"datev"}) + So(err, ShouldBeNil) + resultDatev := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + So(resultDatev.Data.IsNull(0), ShouldBeTrue) + resultInt := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + So(resultInt.Data.IsNull(0), ShouldBeTrue) + resultSym := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + So(resultSym.Data.IsNull(0), ShouldBeTrue) + resultVal := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + So(resultVal.Data.IsNull(0), ShouldBeTrue) + _, err = ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + DfsTBName1 + `").append!(t)`) + So(err, ShouldBeNil) + newdfstable, err := LoadPartitionedTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + for i := 1; i <= 10; i++ { + resultDatev = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + re := resultDatev.Data.Value() + datev := time.Date(2010, time.January, i, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{datev} + assert.Equal(t, re[i-1], tmp[0]) + } + resultInt = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + re := resultInt.Data.Value() + tmpInt := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + for i := 0; i < resultInt.Rows(); i++ { + So(re[i], ShouldEqual, tmpInt[i]) + } + resultSym = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + re = resultSym.Data.Value() + tmpSym := []string{"AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS", "AMD"} + for i := 0; i < resultSym.Rows(); i++ { + So(re[i], ShouldEqual, tmpSym[i]) + } + resultVal = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + re = resultVal.Data.Value() + tmpVal := []int32{39, 50, 5, 24, 79, 39, 8, 67, 29, 55} + for i := 0; i < resultVal.Rows(); i++ { + assert.Equal(t, re[i], tmpVal[i]) + } + // create dimensionTable + _, err = CreateTable(database, "t", TbName1) + So(err, ShouldBeNil) + _, err = ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `").append!(t)`) + So(err, ShouldBeNil) + dimensionTable, err := LoadTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + for i := 1; i <= 10; i++ { + resultDatev = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + re := resultDatev.Data.Value() + datev := time.Date(2010, time.January, i, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{datev} + assert.Equal(t, re[i-1], tmp[0]) + } + resultInt = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + re = resultInt.Data.Value() + tmpInt = []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + for i := 0; i < resultInt.Rows(); i++ { + So(re[i], ShouldEqual, tmpInt[i]) + } + resultSym = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + re = resultSym.Data.Value() + tmpSym = []string{"AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS", "AMD"} + for i := 0; i < resultSym.Rows(); i++ { + So(re[i], ShouldEqual, tmpSym[i]) + } + resultVal = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + re = resultVal.Data.Value() + tmpVal = []int32{39, 50, 5, 24, 79, 39, 8, 67, 29, 55} + for i := 0; i < resultVal.Rows(); i++ { + assert.Equal(t, re[i], tmpVal[i]) + } + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re6, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re6, ShouldBeFalse) + }) + Convey("Test_CreateDatabase_olap_range_partition", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + database, err := CreateDatabase(ddb, DfsDBPath, DBhandler, "RANGE", "0 3 5 10", "", "", "") + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + re3, err := ddb.RunScript("schema(db)") + So(err, ShouldBeNil) + re4 := re3.(*model.Dictionary) + rePartitionType, _ := re4.Get("partitionType") + So(rePartitionType.Value().(*model.Scalar).Value(), ShouldEqual, 2) + reChunkGranularity, _ := re4.Get("chunkGranularity") + So(reChunkGranularity.Value().(*model.Scalar).Value(), ShouldEqual, "TABLE") + reAtomic, _ := re4.Get("atomic") + So(reAtomic.Value().(*model.Scalar).Value(), ShouldEqual, "TRANS") + rePartitionSites, _ := re4.Get("partitionSites") + So(rePartitionSites.Value().(*model.Scalar).IsNull(), ShouldBeTrue) + rePartitionTypeName, _ := re4.Get("partitionTypeName") + So(rePartitionTypeName.Value().(*model.Scalar).Value(), ShouldEqual, "RANGE") + rePartitionSchema, _ := re4.Get("partitionSchema") + tmpPartitionSchema := []int{0, 3, 5, 10} + for i := 0; i < len(rePartitionSchema.Value().(*model.Vector).Data.Value()); i++ { + So(rePartitionSchema.Value().(*model.Vector).Data.Value()[i], ShouldEqual, tmpPartitionSchema[i]) + } + reDatabaseDir, _ := re4.Get("databaseDir") + So(reDatabaseDir.Value().(*model.Scalar).Value(), ShouldEqual, DfsDBPath) + _, err = ddb.RunScript("n=10") + So(err, ShouldBeNil) + _, err = CreateMemTable(ddb, "t", "datev", "id", "sym", "val", "take(2010.01.01..2010.01.31, n)", "[1,4,5,5,6,6,6,6,8,8]", `take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n)`, "take([39, 50, 5, 24, 79, 39, 8, 67, 29, 55], n)") + So(err, ShouldBeNil) + // create dfsTable + dfsTable, err := CreateDefPartitionedTable(database, "t", DfsTBName1, []string{"id"}) + So(err, ShouldBeNil) + resultDatev := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + So(resultDatev.Data.IsNull(0), ShouldBeTrue) + resultInt := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + So(resultInt.Data.IsNull(0), ShouldBeTrue) + resultSym := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + So(resultSym.Data.IsNull(0), ShouldBeTrue) + resultVal := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + So(resultVal.Data.IsNull(0), ShouldBeTrue) + _, err = ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + DfsTBName1 + `").append!(t)`) + So(err, ShouldBeNil) + newdfstable, err := LoadPartitionedTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + for i := 1; i <= 10; i++ { + resultDatev = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + re := resultDatev.Data.Value() + datev := time.Date(2010, time.January, i, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{datev} + assert.Equal(t, re[i-1], tmp[0]) + } + resultInt = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + re := resultInt.Data.Value() + tmpInt := []int32{1, 4, 5, 5, 6, 6, 6, 6, 8, 8} + for i := 0; i < resultInt.Rows(); i++ { + So(re[i], ShouldEqual, tmpInt[i]) + } + resultSym = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + re = resultSym.Data.Value() + tmpSym := []string{"AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS", "AMD"} + for i := 0; i < resultSym.Rows(); i++ { + So(re[i], ShouldEqual, tmpSym[i]) + } + resultVal = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + re = resultVal.Data.Value() + tmpVal := []int32{39, 50, 5, 24, 79, 39, 8, 67, 29, 55} + for i := 0; i < resultVal.Rows(); i++ { + assert.Equal(t, re[i], tmpVal[i]) + } + // create dimensionTable + _, err = CreateTable(database, "t", TbName1) + So(err, ShouldBeNil) + _, err = ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `").append!(t)`) + So(err, ShouldBeNil) + dimensionTable, err := LoadTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + for i := 1; i <= 10; i++ { + resultDatev = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + re := resultDatev.Data.Value() + datev := time.Date(2010, time.January, i, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{datev} + assert.Equal(t, re[i-1], tmp[0]) + } + resultInt = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + re = resultInt.Data.Value() + tmpInt = []int32{1, 4, 5, 5, 6, 6, 6, 6, 8, 8} + for i := 0; i < resultInt.Rows(); i++ { + So(re[i], ShouldEqual, tmpInt[i]) + } + resultSym = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + re = resultSym.Data.Value() + tmpSym = []string{"AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS", "AMD"} + for i := 0; i < resultSym.Rows(); i++ { + So(re[i], ShouldEqual, tmpSym[i]) + } + resultVal = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + re = resultVal.Data.Value() + tmpVal = []int32{39, 50, 5, 24, 79, 39, 8, 67, 29, 55} + for i := 0; i < resultVal.Rows(); i++ { + assert.Equal(t, re[i], tmpVal[i]) + } + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re6, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re6, ShouldBeFalse) + }) + Convey("Test_CreateDatabase_olap_hash_partition", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + database, err := CreateDatabase(ddb, DfsDBPath, DBhandler, "HASH", "[INT, 3]", "", "", "") + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + re3, err := ddb.RunScript("schema(db)") + So(err, ShouldBeNil) + re4 := re3.(*model.Dictionary) + rePartitionType, _ := re4.Get("partitionType") + So(rePartitionType.Value().(*model.Scalar).Value(), ShouldEqual, 5) + reChunkGranularity, _ := re4.Get("chunkGranularity") + So(reChunkGranularity.Value().(*model.Scalar).Value(), ShouldEqual, "TABLE") + reAtomic, _ := re4.Get("atomic") + So(reAtomic.Value().(*model.Scalar).Value(), ShouldEqual, "TRANS") + rePartitionSites, _ := re4.Get("partitionSites") + So(rePartitionSites.Value().(*model.Scalar).IsNull(), ShouldBeTrue) + rePartitionTypeName, _ := re4.Get("partitionTypeName") + So(rePartitionTypeName.Value().(*model.Scalar).Value(), ShouldEqual, "HASH") + rePartitionSchema, _ := re4.Get("partitionSchema") + So(rePartitionSchema.Value().(*model.Scalar).Value(), ShouldEqual, 3) + reDatabaseDir, _ := re4.Get("databaseDir") + So(reDatabaseDir.Value().(*model.Scalar).Value(), ShouldEqual, DfsDBPath) + _, err = ddb.RunScript("n=10") + So(err, ShouldBeNil) + _, err = CreateMemTable(ddb, "t", "datev", "id", "sym", "val", "take(2010.01.01..2010.01.31, n)", "[1,4,5,5,6,6,6,6,8,8]", `take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n)`, "take([39, 50, 5, 24, 79, 39, 8, 67, 29, 55], n)") + So(err, ShouldBeNil) + // create dfsTable + dfsTable, err := CreateDefPartitionedTable(database, "t", DfsTBName1, []string{"id"}) + So(err, ShouldBeNil) + resultDatev := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + So(resultDatev.Data.IsNull(0), ShouldBeTrue) + resultInt := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + So(resultInt.Data.IsNull(0), ShouldBeTrue) + resultSym := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + So(resultSym.Data.IsNull(0), ShouldBeTrue) + resultVal := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + So(resultVal.Data.IsNull(0), ShouldBeTrue) + _, err = ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + DfsTBName1 + `").append!(t)`) + So(err, ShouldBeNil) + newdfstable, err := LoadPartitionedTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + + resultDatev = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + re := resultDatev.String() + So(re, ShouldEqual, "vector([2010.01.05, 2010.01.06, 2010.01.07, 2010.01.08, 2010.01.01, 2010.01.02, 2010.01.03, 2010.01.04, 2010.01.09, 2010.01.10])") + + resultInt = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + reInt := resultInt.Data.Value() + tmpInt := []int32{6, 6, 6, 6, 1, 4, 5, 5, 8, 8} + for i := 0; i < resultInt.Rows(); i++ { + So(reInt[i], ShouldEqual, tmpInt[i]) + } + resultSym = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + resym := resultSym.Data.Value() + tmpSym := []string{"ASZ", "FSD", "BBVC", "AWQ", "AMD", "QWE", "CES", "DOP", "DS", "AMD"} + for i := 0; i < resultSym.Rows(); i++ { + So(resym[i], ShouldEqual, tmpSym[i]) + } + resultVal = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + reVal := resultVal.Data.Value() + tmpVal := []int32{79, 39, 8, 67, 39, 50, 5, 24, 29, 55} + for i := 0; i < resultVal.Rows(); i++ { + So(reVal[i], ShouldEqual, tmpVal[i]) + } + // create dimensionTable + _, err = CreateTable(database, "t", TbName1) + So(err, ShouldBeNil) + _, err = ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `").append!(t)`) + So(err, ShouldBeNil) + dimensionTable, err := LoadTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + for i := 1; i <= 10; i++ { + resultDatev = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + re := resultDatev.Data.Value() + datev := time.Date(2010, time.January, i, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{datev} + So(re[i-1], ShouldEqual, tmp[0]) + } + resultInt = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + reInt = resultInt.Data.Value() + tmpInt = []int32{1, 4, 5, 5, 6, 6, 6, 6, 8, 8} + for i := 0; i < resultInt.Rows(); i++ { + So(reInt[i], ShouldEqual, tmpInt[i]) + } + resultSym = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + reSym := resultSym.Data.Value() + tmpSym = []string{"AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS", "AMD"} + for i := 0; i < resultSym.Rows(); i++ { + So(reSym[i], ShouldEqual, tmpSym[i]) + } + resultVal = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + reVal = resultVal.Data.Value() + tmpVal = []int32{39, 50, 5, 24, 79, 39, 8, 67, 29, 55} + for i := 0; i < resultVal.Rows(); i++ { + So(reVal[i], ShouldEqual, tmpVal[i]) + } + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re6, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re6, ShouldBeFalse) + }) + Convey("Test_CreateDatabase_olap_list_partition", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + database, err := CreateDatabase(ddb, DfsDBPath, DBhandler, "LIST", "[`AMD`QWE`CES,`DOP`ASZ,`FSD`BBVC,`AWQ`DS]", "", "", "") + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + re3, err := ddb.RunScript("schema(db)") + So(err, ShouldBeNil) + re4 := re3.(*model.Dictionary) + rePartitionType, _ := re4.Get("partitionType") + So(rePartitionType.Value().(*model.Scalar).Value(), ShouldEqual, 3) + reChunkGranularity, _ := re4.Get("chunkGranularity") + So(reChunkGranularity.Value().(*model.Scalar).Value(), ShouldEqual, "TABLE") + reAtomic, _ := re4.Get("atomic") + So(reAtomic.Value().(*model.Scalar).Value(), ShouldEqual, "TRANS") + rePartitionSites, _ := re4.Get("partitionSites") + So(rePartitionSites.Value().(*model.Scalar).IsNull(), ShouldBeTrue) + rePartitionTypeName, _ := re4.Get("partitionTypeName") + So(rePartitionTypeName.Value().(*model.Scalar).Value(), ShouldEqual, "LIST") + rePartitionSchema, _ := re4.Get("partitionSchema") + So(rePartitionSchema.Value().(*model.Vector).String(), ShouldEqual, "vector([vector([AMD, QWE, CES]), vector([DOP, ASZ]), vector([FSD, BBVC]), vector([AWQ, DS])])") + reDatabaseDir, _ := re4.Get("databaseDir") + So(reDatabaseDir.Value().(*model.Scalar).Value(), ShouldEqual, DfsDBPath) + _, err = ddb.RunScript("n=10") + So(err, ShouldBeNil) + _, err = CreateMemTable(ddb, "t", "datev", "id", "sym", "val", "take(2010.01.01..2010.01.31, n)", "[1,4,5,5,6,6,6,6,8,8]", `take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n)`, "take([39, 50, 5, 24, 79, 39, 8, 67, 29, 55], n)") + So(err, ShouldBeNil) + // create dfsTable + dfsTable, err := CreateDefPartitionedTable(database, "t", DfsTBName1, []string{"sym"}) + So(err, ShouldBeNil) + resultDatev := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + So(resultDatev.Data.IsNull(0), ShouldBeTrue) + resultInt := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + So(resultInt.Data.IsNull(0), ShouldBeTrue) + resultSym := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + So(resultSym.Data.IsNull(0), ShouldBeTrue) + resultVal := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + So(resultVal.Data.IsNull(0), ShouldBeTrue) + _, err = ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + DfsTBName1 + `").append!(t)`) + So(err, ShouldBeNil) + newdfstable, err := LoadPartitionedTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + + resultDatev = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + re := resultDatev.String() + So(re, ShouldEqual, "vector([2010.01.01, 2010.01.02, 2010.01.03, 2010.01.10, 2010.01.04, 2010.01.05, 2010.01.06, 2010.01.07, 2010.01.08, 2010.01.09])") + + resultInt = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + reInt := resultInt.Data.Value() + tmpInt := []int32{1, 4, 5, 8, 5, 6, 6, 6, 6, 8} + for i := 0; i < resultInt.Rows(); i++ { + So(reInt[i], ShouldEqual, tmpInt[i]) + } + resultSym = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + resym := resultSym.Data.Value() + tmpSym := []string{"AMD", "QWE", "CES", "AMD", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"} + for i := 0; i < resultSym.Rows(); i++ { + So(resym[i], ShouldEqual, tmpSym[i]) + } + resultVal = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + reVal := resultVal.Data.Value() + tmpVal := []int32{39, 50, 5, 55, 24, 79, 39, 8, 67, 29} + for i := 0; i < resultVal.Rows(); i++ { + So(reVal[i], ShouldEqual, tmpVal[i]) + } + // create dimensionTable + _, err = CreateTable(database, "t", TbName1) + So(err, ShouldBeNil) + _, err = ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `").append!(t)`) + So(err, ShouldBeNil) + dimensionTable, err := LoadTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + for i := 1; i <= 10; i++ { + resultDatev = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + re := resultDatev.Data.Value() + datev := time.Date(2010, time.January, i, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{datev} + So(re[i-1], ShouldEqual, tmp[0]) + } + resultInt = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + reInt = resultInt.Data.Value() + tmpInt = []int32{1, 4, 5, 5, 6, 6, 6, 6, 8, 8} + for i := 0; i < resultInt.Rows(); i++ { + So(reInt[i], ShouldEqual, tmpInt[i]) + } + resultSym = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + reSym := resultSym.Data.Value() + tmpSym = []string{"AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS", "AMD"} + for i := 0; i < resultSym.Rows(); i++ { + So(reSym[i], ShouldEqual, tmpSym[i]) + } + resultVal = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + reVal = resultVal.Data.Value() + tmpVal = []int32{39, 50, 5, 24, 79, 39, 8, 67, 29, 55} + for i := 0; i < resultVal.Rows(); i++ { + So(reVal[i], ShouldEqual, tmpVal[i]) + } + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re6, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re6, ShouldBeFalse) + }) + Convey("Test_CreateDatabase_olap_compo_partition", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + _, err = CreateDatabase(ddb, "", "db1", "VALUE", "2010.01.01..2010.01.30", "", "", "") + So(err, ShouldBeNil) + _, err = CreateDatabase(ddb, "", "db2", "RANGE", "1 3 5 7 9 10", "", "", "") + So(err, ShouldBeNil) + database, err := CreateDatabase(ddb, DfsDBPath, DBhandler, "COMPO", "[db1, db2]", "", "", "") + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + re3, err := ddb.RunScript("schema(db)") + So(err, ShouldBeNil) + re4 := re3.(*model.Dictionary) + rePartitionType, _ := re4.Get("partitionType") + So(rePartitionType.Value().(*model.Vector).String(), ShouldEqual, "vector([1, 2])") + reChunkGranularity, _ := re4.Get("chunkGranularity") + So(reChunkGranularity.Value().(*model.Scalar).Value(), ShouldEqual, "TABLE") + reAtomic, _ := re4.Get("atomic") + So(reAtomic.Value().(*model.Scalar).Value(), ShouldEqual, "TRANS") + rePartitionSites, _ := re4.Get("partitionSites") + So(rePartitionSites.Value().(*model.Scalar).IsNull(), ShouldBeTrue) + rePartitionTypeName, _ := re4.Get("partitionTypeName") + So(rePartitionTypeName.Value().(*model.Vector).String(), ShouldEqual, "vector([VALUE, RANGE])") + rePartitionSchema, _ := re4.Get("partitionSchema") + re := rePartitionSchema.Value().(*model.Vector).Get(0).Value().(*model.Vector).Data.Value() + j := 0 + for i := 30; i >= 1; i-- { + datev := time.Date(2010, time.January, i, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{datev} + So(re[j], ShouldEqual, tmp[0]) + j++ + } + So(rePartitionSchema.Value().(*model.Vector).Get(1).Value().(*model.Vector).String(), ShouldEqual, "vector([1, 3, 5, 7, 9, 10])") + reDatabaseDir, _ := re4.Get("databaseDir") + So(reDatabaseDir.Value().(*model.Scalar).Value(), ShouldEqual, DfsDBPath) + _, err = ddb.RunScript("n=10") + So(err, ShouldBeNil) + _, err = CreateMemTable(ddb, "t", "datev", "id", "sym", "val", "take(2010.01.01..2010.01.31, n)", "[1,4,5,5,6,6,6,6,8,8]", `take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n)`, "take([39, 50, 5, 24, 79, 39, 8, 67, 29, 55], n)") + So(err, ShouldBeNil) + // create dfsTable + dfsTable, err := CreateDefPartitionedTable(database, "t", DfsTBName1, []string{"datev", "id"}) + So(err, ShouldBeNil) + resultDatev := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + So(resultDatev.Data.IsNull(0), ShouldBeTrue) + resultInt := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + So(resultInt.Data.IsNull(0), ShouldBeTrue) + resultSym := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + So(resultSym.Data.IsNull(0), ShouldBeTrue) + resultVal := dfsTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + So(resultVal.Data.IsNull(0), ShouldBeTrue) + _, err = ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + DfsTBName1 + `").append!(t)`) + So(err, ShouldBeNil) + newdfstable, err := LoadPartitionedTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + + resultDatev = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + reDate := resultDatev.Data.Value() + for i := 1; i < resultDatev.Rows(); i++ { + datev := time.Date(2010, time.January, i, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{datev} + So(reDate[i-1], ShouldEqual, tmp[0]) + } + resultInt = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + reInt := resultInt.Data.Value() + tmpInt := []int32{1, 4, 5, 5, 6, 6, 6, 6, 8, 8} + for i := 0; i < resultInt.Rows(); i++ { + So(reInt[i], ShouldEqual, tmpInt[i]) + } + resultSym = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + resym := resultSym.Data.Value() + tmpSym := []string{"AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS", "AMD"} + for i := 0; i < resultSym.Rows(); i++ { + So(resym[i], ShouldEqual, tmpSym[i]) + } + resultVal = newdfstable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + reVal := resultVal.Data.Value() + tmpVal := []int32{39, 50, 5, 24, 79, 39, 8, 67, 29, 55} + for i := 0; i < resultVal.Rows(); i++ { + So(reVal[i], ShouldEqual, tmpVal[i]) + } + // create dimensionTable + _, err = CreateTable(database, "t", TbName1) + So(err, ShouldBeNil) + _, err = ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `").append!(t)`) + So(err, ShouldBeNil) + dimensionTable, err := LoadTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + for i := 1; i <= 10; i++ { + resultDatev = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[0]) + re := resultDatev.Data.Value() + datev := time.Date(2010, time.January, i, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{datev} + So(re[i-1], ShouldEqual, tmp[0]) + } + resultInt = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[1]) + reInt = resultInt.Data.Value() + tmpInt = []int32{1, 4, 5, 5, 6, 6, 6, 6, 8, 8} + for i := 0; i < resultInt.Rows(); i++ { + So(reInt[i], ShouldEqual, tmpInt[i]) + } + resultSym = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[2]) + reSym := resultSym.Data.Value() + tmpSym = []string{"AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS", "AMD"} + for i := 0; i < resultSym.Rows(); i++ { + So(reSym[i], ShouldEqual, tmpSym[i]) + } + resultVal = dimensionTable.Data.GetColumnByName(dfsTable.Data.GetColumnNames()[3]) + reVal = resultVal.Data.Value() + tmpVal = []int32{39, 50, 5, 24, 79, 39, 8, 67, 29, 55} + for i := 0; i < resultVal.Rows(); i++ { + So(reVal[i], ShouldEqual, tmpVal[i]) + } + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re6, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re6, ShouldBeFalse) + }) + }) +} + +func TestDataBaseGetSession(t *testing.T) { + Convey("Test_CreateDatabase_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("TestCreateDatabase_dropDatabase", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + } + }) + Convey("Test_CreateDatabase_olap_value_partition", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + database, err := CreateDatabase(ddb, DfsDBPath, DBhandler, "VALUE", "2010.01.01..2010.01.30", "", "", "") + So(err, ShouldBeNil) + res := database.GetSession() + So(res, ShouldNotBeNil) + }) + }) +} diff --git a/test/dbConnection_test.go b/test/dbConnection_test.go new file mode 100644 index 0000000..29794e2 --- /dev/null +++ b/test/dbConnection_test.go @@ -0,0 +1,367 @@ +package test + +import ( + "context" + "fmt" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/dialer" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func TestNewDolphinDBClient(t *testing.T) { + Convey("func NewDolphinDB exception test", t, func() { + Convey("Test NewDolphinDB wrong address exception", func() { + _, err := api.NewDolphinDBClient(context.TODO(), "123456", nil) + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + + Convey("Test NewDolphinDB login wrong userName exception", func() { + db, _ := api.NewDolphinDBClient(context.TODO(), setup.Address, nil) + err := db.Connect() + So(err, ShouldBeNil) + defer db.Close() + loginReq := new(api.LoginRequest). + SetUserID("wrongName"). + SetPassword(setup.Password) + err = db.Login(loginReq) + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + + Convey("Test NewDolphinDB login wrong password exception", func() { + db, _ := api.NewDolphinDBClient(context.TODO(), setup.Address, nil) + err := db.Connect() + So(err, ShouldBeNil) + defer db.Close() + loginReq := new(api.LoginRequest). + SetUserID(setup.UserName). + SetPassword("wrong password") + err = db.Login(loginReq) + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + }) + + Convey("Test NewDolphinDB login and logout", t, func() { + Convey("Test NewDolphinDB login", func() { + db, err := api.NewDolphinDBClient(context.TODO(), setup.Address, nil) + So(err, ShouldBeNil) + err = db.Connect() + So(err, ShouldBeNil) + defer db.Close() + loginReq := new(api.LoginRequest). + SetUserID(setup.UserName). + SetPassword(setup.Password) + err = db.Login(loginReq) + So(err, ShouldBeNil) + }) + + Convey("Test NewDolphinDB logout", func() { + db, _ := api.NewDolphinDBClient(context.TODO(), setup.Address, nil) + err := db.Connect() + So(err, ShouldBeNil) + defer db.Close() + loginReq := new(api.LoginRequest). + SetUserID(setup.UserName). + SetPassword(setup.Password) + err = db.Login(loginReq) + So(err, ShouldBeNil) + err = db.Logout() + So(err, ShouldBeNil) + }) + }) +} + +func TestNewSimpleDolphinDBClient(t *testing.T) { + Convey("func NewSimpleDolphinDB exception test", t, func() { + Convey("Test NewSimpleDolphinDB wrong address exception", func() { + _, err := api.NewSimpleDolphinDBClient(context.TODO(), "wrongAddress", setup.UserName, setup.Password) + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + + Convey("Test NewSimpleDolphinDB wrong userName int exception", func() { + _, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, "1234", setup.Password) + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + + Convey("Test NewSimpleDolphinDB wrong password exception", func() { + _, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, "12") + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + }) + + Convey("Test NewSimpleDolphinDB login and logout", t, func() { + Convey("Test NewSimpleDolphinDB login", func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + dbName := `"dfs://test"` + re, err := db.RunScript( + `dbName=` + dbName + ` + if(existsDatabase(dbName)){ + dropDatabase(dbName) + } + db=database(dbName, VALUE, 1..10) + db`) + So(err, ShouldBeNil) + s := re.(*model.Scalar) + result := s.DataType.Value() + ex := "DB[dfs://test]" + So(result, ShouldEqual, ex) + }) + + Convey("Test NewSimpleDolphinDB logout", func() { + db, _ := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + err := db.Logout() + So(err, ShouldBeNil) + re, err := db.RunScript(` + dbName="dfs://test" + if(existsDatabase(dbName)){ + dropDatabase(dbName) + } + db=database(dbName, VALUE, 1..10)`) + result := fmt.Errorf("\n error is %w", err) + So(re, ShouldBeNil) + So(result, ShouldNotBeNil) + }) + }) +} + +func TestClose(t *testing.T) { + Convey("Test connection Close", t, func() { + Convey("Test NewDolphinDB Close", func() { + db, err := api.NewDolphinDBClient(context.TODO(), setup.Address, nil) + So(err, ShouldBeNil) + err = db.Connect() + So(err, ShouldBeNil) + db.Close() + connections, err := db.RunScript("getConnections()") + So(connections, ShouldBeNil) + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + + Convey("Test NewSimpleDolphinDB Close", func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + db.Close() + connections, err := db.RunScript("getConnections()") + So(connections, ShouldBeNil) + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + }) +} + +func TestIsClosed(t *testing.T) { + Convey("Test connection IsClosed", t, func() { + Convey("Test NewDolphinDB IsClosed", func() { + db, err := api.NewDolphinDBClient(context.TODO(), setup.Address, nil) + So(err, ShouldBeNil) + err = db.Connect() + So(err, ShouldBeNil) + IsClosedd := db.IsClosed() + So(IsClosedd, ShouldEqual, false) + err = db.Close() + IsClosedd = db.IsClosed() + So(err, ShouldBeNil) + So(IsClosedd, ShouldEqual, true) + }) + + Convey("Test NewSimpleDolphinDB IsClosed", func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + IsClosedd := db.IsClosed() + So(IsClosedd, ShouldEqual, false) + err = db.Close() + IsClosedd = db.IsClosed() + So(err, ShouldBeNil) + So(IsClosedd, ShouldEqual, true) + }) + }) +} + +func TestRefreshTimeout(t *testing.T) { + Convey("Test RefreshTimeout NewSimpleConn", t, func() { + db, err := dialer.NewSimpleConn(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + SessionID1 := db.GetSession() + So(SessionID1, ShouldNotBeNil) + db.RefreshTimeout(100) + SessionID2 := db.GetSession() + So(SessionID1, ShouldEqual, SessionID2) + db.Close() + }) + Convey("Test RefreshTimeout NewConn", t, func() { + db, err := dialer.NewConn(context.TODO(), setup.Address, nil) + So(err, ShouldBeNil) + err = db.Connect() + So(err, ShouldBeNil) + SessionID1 := db.GetSession() + So(SessionID1, ShouldNotBeNil) + db.RefreshTimeout(100) + SessionID2 := db.GetSession() + So(SessionID1, ShouldEqual, SessionID2) + db.Close() + }) +} +func TestGetSession(t *testing.T) { + Convey("Test connection GetSession", t, func() { + Convey("Test NewDolphinDB GetSession", func() { + db, err := api.NewDolphinDBClient(context.TODO(), setup.Address, nil) + So(err, ShouldBeNil) + err = db.Connect() + So(err, ShouldBeNil) + SessionID := db.GetSession() + So(SessionID, ShouldNotBeNil) + err = db.Close() + SessionID = db.GetSession() + So(err, ShouldBeNil) + So(SessionID, ShouldEqual, "") + }) + + Convey("Test NewSimpleDolphinDB GetSession", func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + SessionID := db.GetSession() + So(SessionID, ShouldNotBeNil) + err = db.Close() + SessionID = db.GetSession() + So(err, ShouldBeNil) + So(SessionID, ShouldEqual, "") + }) + }) +} + +func TestNewConn(t *testing.T) { + Convey("func NewConn exception test", t, func() { + Convey("Test NewConn wrong address exception", func() { + _, err := dialer.NewConn(context.TODO(), "123456", nil) + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + }) + Convey("Test NewConn connection", t, func() { + db, err := dialer.NewConn(context.TODO(), setup.Address, nil) + So(err, ShouldBeNil) + err = db.Connect() + So(err, ShouldBeNil) + SessionID := db.GetSession() + So(SessionID, ShouldNotBeNil) + err = db.Close() + So(err, ShouldBeNil) + }) +} + +func TestNewSimpleConn(t *testing.T) { + Convey("func NewSimpleConn exception test", t, func() { + Convey("Test NewSimpleConn wrong address exception", func() { + _, err := dialer.NewSimpleConn(context.TODO(), "wrongAddress", setup.UserName, setup.Password) + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + + Convey("Test NewSimpleConn wrong userName int exception", func() { + _, err := dialer.NewSimpleConn(context.TODO(), setup.Address, "1234", setup.Password) + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + + Convey("Test NewSimpleConn wrong password exception", func() { + _, err := dialer.NewSimpleConn(context.TODO(), setup.Address, setup.UserName, "12") + result := fmt.Errorf("\n exception error is %w", err) + fmt.Println(result.Error()) + So(result, ShouldNotBeNil) + }) + }) + + Convey("Test NewSimpleConn login and logout", t, func() { + Convey("Test NewSimpleConn login", func() { + db, err := dialer.NewSimpleConn(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + dbName := `"dfs://test"` + re, err := db.RunScript( + `dbName=` + dbName + ` + if(existsDatabase(dbName)){ + dropDatabase(dbName) + } + db=database(dbName, VALUE, 1..10) + db`) + So(err, ShouldBeNil) + s := re.(*model.Scalar) + result := s.DataType.Value() + ex := "DB[dfs://test]" + So(result, ShouldEqual, ex) + db.Close() + }) + + Convey("Test NewSimpleConn getSessionId", func() { + db, _ := dialer.NewSimpleConn(context.TODO(), setup.Address, setup.UserName, setup.Password) + re, err := db.RunScript(` + dbName="dfs://test" + if(existsDatabase(dbName)){ + dropDatabase(dbName) + } + db=database(dbName, VALUE, 1..10)`) + result := fmt.Errorf("\n error is %w", err) + So(re, ShouldBeNil) + So(result, ShouldNotBeNil) + SessionID := db.GetSession() + So(SessionID, ShouldNotBeNil) + err = db.Close() + So(err, ShouldBeNil) + add := db.GetLocalAddress() + So(add, ShouldEqual, setup.IP) + db.Close() + }) + }) +} + +func TestGetLocalAddress(t *testing.T) { + Convey("Test GetLocalAddress NewSimpleConn", t, func() { + db, err := dialer.NewSimpleConn(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + re := db.GetLocalAddress() + So(re, ShouldEqual, setup.IP) + db.Close() + }) + Convey("Test GetLocalAddress NewConn", t, func() { + db, err := dialer.NewConn(context.TODO(), setup.Address, nil) + So(err, ShouldBeNil) + re := db.GetLocalAddress() + So(re, ShouldEqual, setup.IP) + db.Close() + }) + Convey("Test GetLocalAddress NewSimpleDolphinDBClient", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + re := db.GetLocalAddress() + So(re, ShouldEqual, setup.IP) + db.Close() + }) + Convey("Test GetLocalAddress NewDolphinDBClient", t, func() { + db, err := api.NewDolphinDBClient(context.TODO(), setup.Address, nil) + So(err, ShouldBeNil) + re := db.GetLocalAddress() + So(re, ShouldEqual, setup.IP) + db.Close() + }) +} diff --git a/test/dfsTable_test.go b/test/dfsTable_test.go new file mode 100644 index 0000000..3edf901 --- /dev/null +++ b/test/dfsTable_test.go @@ -0,0 +1,404 @@ +package test + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" + "github.com/stretchr/testify/assert" +) + +func CreateScript(Num int) string { + script := ` + dbName="dfs://test_dfs_table" + if(existsDatabase(dbName)){ + dropDatabase(dbName) + } + n=` + strconv.Itoa(Num) + ` + t=table(100:0, ["sym", "boolv", "intv", "longv", "shortv", "doublev", "floatv", "str", "charv", "timestampv", "datev", "datetimev", "monthv", "timev", "minutev", "secondv", "nanotimev", "nanotimestamp", "datehourv", "uuidv", "ipaddrv", "int128v"], + [SYMBOL, BOOL, INT, LONG, SHORT, DOUBLE, FLOAT, STRING, CHAR, TIMESTAMP, DATE, DATETIME, MONTH, TIME, MINUTE, SECOND, NANOTIME, NANOTIMESTAMP, DATEHOUR, UUID, IPADDR, INT128]) + db=database(dbName, VALUE, ["A", "B", "C", "D", "E", "F"]) + pt=db.createPartitionedTable(t, "pt", "sym") + sym = take(["A", "B", "C", "D", "E", "F"], n) + boolv = take([true, false, true, false, false, true, true], n) + intv = take([91,NULL,69,16,35,NULL,57,-28,-81,26], n) + longv = take([99,23,92,NULL,49,67,NULL,81,-38,14], n) + shortv = take([47,26,-39,NULL,97,NULL,4,39,-51,25], n) + doublev = take([4.7,2.6,-3.9,NULL,9.7,4.9,NULL,3.9,5.1,2.5], n) + floatv = take([5.2f, 11.3f, -3.9, 1.2f, 7.8f, -4.9f, NULL, 3.9f, 5.1f, 2.5f], n) + str = take("str" + string(1..10), n) + charv = take(char([70, 72, 15, 98, 94]), n) + timestampv = take([2012.01.01T12:23:56.166, NULL, 1970.01.01T12:23:56.148, 1969.12.31T23:59:59.138, 2012.01.01T12:23:56.132], n) + datev = take([NULL, 1969.01.11, 1970.01.24, 1969.12.31, 2012.03.30], n) + datetimev = take([NULL, 2012.01.01T12:24:04, 2012.01.01T12:25:04, 2012.01.01T12:24:55, 2012.01.01T12:24:27], n) + monthv = take([1970.06M, 2014.05M, 1970.06M, 2017.12M, 1969.11M], n) + timev = take([12:23:56.156, NULL, 12:23:56.206, 12:23:56.132, 12:23:56.201], n) + minutev = take([12:47m,13:13m, NULL, 13:49m, 13:17m], n) + secondv = take([NULL, 00:03:11, 00:01:52, 00:02:43, 00:02:08], n) + nanotimev = take(nanotime(1..10) join nanotime(), n) + nanotimestampv = take(nanotimestamp(-5..5) join nanotimestamp(), n) + datehourv = take(datehour([1969.12.01, 1969.01.11, NULL, 1969.12.31, 2012.03.30]), n) + uuidv = take([uuid("7d943e7f-5660-e015-a895-fa4da2b36c43"), uuid("3272fc73-5a91-34f5-db39-6ee71aa479a4"), uuid("62746671-9870-5b92-6deb-a6f5d59e715e"), uuid("dd05902d-5561-ee7f-6318-41a107371a8d"), uuid("14f82b2a-cf0f-7a0c-4cba-3df7be0ba0fc"), uuid("1f9093c3-9132-7200-4893-0f937a0d52c9")], n) + ipaddrv = take([ipaddr("a9b7:f65:9be1:20fd:741a:97ac:6ce5:1dd"), ipaddr("8494:3a0e:13db:a097:d3fd:8dc:56e4:faed"), ipaddr("4d93:5be:edbc:1830:344d:f71b:ce65:a4a3"), ipaddr("70ff:6bb4:a554:5af5:d90c:49f4:e8e6:eff0"), ipaddr("51b3:1bf0:1e65:740a:2b:51d9:162f:385a"), ipaddr("d6ea:3fcb:54bf:169f:9ab5:63bf:a960:19fb")], n) + int128v = take([int128("7667974ea2fb155252559cc28b4a8efa"), int128("e7ef2788305d0f9c2c53cbfe3c373250"), int128("e602ccab7ff343e227b9596368ad5a44"), int128("709f888e885cfa716e0f36a0387477d5"), int128("978b68ce63f35ffbb79f23bd022269d8"), int128("022fd928ccbfc91efa6719ac22ccd239")], n) + t = table(sym, boolv, intv, longv, shortv, doublev, floatv, str, charv, timestampv, datev, datetimev, monthv, timev, minutev, secondv, nanotimev, nanotimestampv, datehourv, uuidv, ipaddrv, int128v) + pt.append!(t)` + return script +} + +func TestDfsTable(t *testing.T) { + Convey("test dfsTable download data", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + var rowNum int + Convey("test dfsTable only one rows", func() { + rowNum = 1 + _, err = db.RunScript(CreateScript(rowNum)) + So(err, ShouldBeNil) + Convey("Test select single col from dfsTable:", func() { + Convey("Test select bool col from dfsTable:", func() { + s, err := db.RunScript("select boolv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reBool := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reBool.GetDataType(), ShouldEqual, model.DtBool) + So(reBool.GetDataForm(), ShouldResemble, model.DfVector) + So(reBool.Rows(), ShouldEqual, rowNum) + re := reBool.Data.Value() + tmp := []bool{true} + for i := 0; i < reBool.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select int col from dfsTable:", func() { + s, err := db.RunScript("select intv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reInt := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reInt.GetDataType(), ShouldEqual, model.DtInt) + So(reInt.GetDataForm(), ShouldResemble, model.DfVector) + So(reInt.Rows(), ShouldEqual, rowNum) + re := reInt.Data.Value() + tmp := []int32{91} + for i := 0; i < reInt.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select long col from dfsTable:", func() { + s, err := db.RunScript("select longv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reLong := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reLong.GetDataType(), ShouldEqual, model.DtLong) + So(reLong.GetDataForm(), ShouldResemble, model.DfVector) + So(reLong.Rows(), ShouldEqual, rowNum) + re := reLong.Data.Value() + tmp := []int64{99} + for i := 0; i < reLong.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select short col from dfsTable:", func() { + s, err := db.RunScript("select shortv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reShort := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reShort.GetDataType(), ShouldEqual, model.DtShort) + So(reShort.GetDataForm(), ShouldResemble, model.DfVector) + So(reShort.Rows(), ShouldEqual, rowNum) + re := reShort.Data.Value() + tmp := []int16{47} + for i := 0; i < reShort.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select float col from dfsTable:", func() { + s, err := db.RunScript("select floatv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reFloat := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reFloat.GetDataType(), ShouldEqual, model.DtFloat) + So(reFloat.GetDataForm(), ShouldResemble, model.DfVector) + So(reFloat.Rows(), ShouldEqual, rowNum) + re := reFloat.Data.Value() + tmp := []float32{5.2} + for i := 0; i < reFloat.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select double col from dfsTable:", func() { + s, err := db.RunScript("select doublev from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reDouble := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reDouble.GetDataType(), ShouldEqual, model.DtDouble) + So(reDouble.GetDataForm(), ShouldResemble, model.DfVector) + So(reDouble.Rows(), ShouldEqual, rowNum) + re := reDouble.Data.Value() + tmp := []float64{4.7} + for i := 0; i < reDouble.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select string col from dfsTable:", func() { + s, err := db.RunScript("select str from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reDouble := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reDouble.GetDataType(), ShouldEqual, model.DtString) + So(reDouble.GetDataForm(), ShouldResemble, model.DfVector) + So(reDouble.Rows(), ShouldEqual, rowNum) + re := reDouble.Data.Value() + tmp := []string{"str1"} + for i := 0; i < reDouble.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select symbol col from dfsTable:", func() { + s, err := db.RunScript("select sym from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reDouble := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reDouble.GetDataType(), ShouldEqual, model.DtSymbol) + So(reDouble.GetDataForm(), ShouldResemble, model.DfVector) + So(reDouble.Rows(), ShouldEqual, rowNum) + re := reDouble.Data.Value() + tmp := []string{"A"} + for i := 0; i < reDouble.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select char col from dfsTable:", func() { + s, err := db.RunScript("select charv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reDouble := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reDouble.GetDataType(), ShouldEqual, model.DtChar) + So(reDouble.GetDataForm(), ShouldResemble, model.DfVector) + So(reDouble.Rows(), ShouldEqual, rowNum) + re := reDouble.Data.Value() + tmp := []int8{70} + for i := 0; i < reDouble.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select timestamp col from dfsTable:", func() { + s, err := db.RunScript("select timestampv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reTimestamp := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reTimestamp.GetDataType(), ShouldEqual, model.DtTimestamp) + So(reTimestamp.GetDataForm(), ShouldResemble, model.DfVector) + So(reTimestamp.Rows(), ShouldEqual, rowNum) + re := reTimestamp.Data.Value() + timestampv := time.Date(2012, time.January, 01, 12, 23, 56, 166*1000000, time.UTC) //.Format("2006-01-02 15:04:05.166") + tmp := []time.Time{timestampv} + for i := 0; i < reTimestamp.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select datev col from dfsTable:", func() { + s, err := db.RunScript("select datev from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reDate := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reDate.GetDataType(), ShouldEqual, model.DtDate) + So(reDate.GetDataForm(), ShouldResemble, model.DfVector) + So(reDate.Rows(), ShouldEqual, rowNum) + re := reDate.Data.IsNull(0) + So(re, ShouldEqual, true) + }) + Convey("Test select datetimev col from dfsTable:", func() { + s, err := db.RunScript("select datetimev from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reDatetime := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reDatetime.GetDataType(), ShouldEqual, model.DtDatetime) + So(reDatetime.GetDataForm(), ShouldResemble, model.DfVector) + So(reDatetime.Rows(), ShouldEqual, rowNum) + re := reDatetime.Data.IsNull(0) + So(re, ShouldEqual, true) + }) + Convey("Test select month col from dfsTable:", func() { + s, err := db.RunScript("select monthv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reMonth := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reMonth.GetDataType(), ShouldEqual, model.DtMonth) + So(reMonth.GetDataForm(), ShouldResemble, model.DfVector) + So(reMonth.Rows(), ShouldEqual, rowNum) + re := reMonth.Data.Value() + monthv := time.Date(1970, time.June, 01, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{monthv} + for i := 0; i < reMonth.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select time col from dfsTable:", func() { + s, err := db.RunScript("select timev from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reTime := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reTime.GetDataType(), ShouldEqual, model.DtTime) + So(reTime.GetDataForm(), ShouldResemble, model.DfVector) + So(reTime.Rows(), ShouldEqual, rowNum) + re := reTime.Data.Value() + timev := time.Date(1970, time.January, 01, 12, 23, 56, 156*1000000, time.UTC) + tmp := []time.Time{timev} + for i := 0; i < reTime.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select minute col from dfsTable:", func() { + s, err := db.RunScript("select minutev from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reMinute := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reMinute.GetDataType(), ShouldEqual, model.DtMinute) + So(reMinute.GetDataForm(), ShouldResemble, model.DfVector) + So(reMinute.Rows(), ShouldEqual, rowNum) + re := reMinute.Data.Value() + minutev := time.Date(1970, time.January, 01, 12, 47, 0, 0, time.UTC) + tmp := []time.Time{minutev} + for i := 0; i < reMinute.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select second col from dfsTable:", func() { + s, err := db.RunScript("select secondv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reSecond := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reSecond.GetDataType(), ShouldEqual, model.DtSecond) + So(reSecond.GetDataForm(), ShouldResemble, model.DfVector) + So(reSecond.Rows(), ShouldEqual, rowNum) + re := reSecond.Data.IsNull(0) + So(re, ShouldEqual, true) + }) + Convey("Test select nanotime col from dfsTable:", func() { + s, err := db.RunScript("select nanotimev from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reNanotime := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reNanotime.GetDataType(), ShouldEqual, model.DtNanoTime) + So(reNanotime.GetDataForm(), ShouldResemble, model.DfVector) + So(reNanotime.Rows(), ShouldEqual, rowNum) + re := reNanotime.Data.Value() + nanotimev := time.Date(1970, time.January, 01, 0, 0, 0, 000000001, time.UTC) + tmp := []time.Time{nanotimev} + for i := 0; i < reNanotime.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select nanotimestamp col from dfsTable:", func() { + s, err := db.RunScript("select nanotimestampv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reNanotimestamp := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reNanotimestamp.GetDataType(), ShouldEqual, model.DtNanoTimestamp) + So(reNanotimestamp.GetDataForm(), ShouldResemble, model.DfVector) + So(reNanotimestamp.Rows(), ShouldEqual, rowNum) + re := reNanotimestamp.Data.Value() + nanotimestampv := time.Date(1969, time.December, 31, 23, 59, 59, 999999995, time.UTC) + tmp := []time.Time{nanotimestampv} + for i := 0; i < reNanotimestamp.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select datehour col from dfsTable:", func() { + s, err := db.RunScript("select datehourv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reDatehourv := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reDatehourv.GetDataType(), ShouldEqual, model.DtDateHour) + So(reDatehourv.GetDataForm(), ShouldResemble, model.DfVector) + So(reDatehourv.Rows(), ShouldEqual, rowNum) + re := reDatehourv.Data.Value() + datehourv := time.Date(1969, time.December, 01, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{datehourv} + for i := 0; i < reDatehourv.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select uuid col from dfsTable:", func() { + s, err := db.RunScript("select uuidv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reUUID := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reUUID.GetDataType(), ShouldEqual, model.DtUUID) + So(reUUID.GetDataForm(), ShouldResemble, model.DfVector) + So(reUUID.Rows(), ShouldEqual, rowNum) + re := reUUID.Data.Value() + tmp := []string{"7d943e7f-5660-e015-a895-fa4da2b36c43"} + for i := 0; i < reUUID.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select ipaddr col from dfsTable:", func() { + s, err := db.RunScript("select ipaddrv from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reIpaddr := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reIpaddr.GetDataType(), ShouldEqual, model.DtIP) + So(reIpaddr.GetDataForm(), ShouldResemble, model.DfVector) + So(reIpaddr.Rows(), ShouldEqual, rowNum) + re := reIpaddr.Data.Value() + tmp := []string{"a9b7:f65:9be1:20fd:741a:97ac:6ce5:1dd"} + for i := 0; i < reIpaddr.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test select int128 col from dfsTable:", func() { + s, err := db.RunScript("select int128v from loadTable(dbName, `pt)") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + reInt128 := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reInt128.GetDataType(), ShouldEqual, model.DtInt128) + So(reInt128.GetDataForm(), ShouldResemble, model.DfVector) + So(reInt128.Rows(), ShouldEqual, rowNum) + re := reInt128.Data.Value() + tmp := []string{"7667974ea2fb155252559cc28b4a8efa"} + for i := 0; i < reInt128.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + }) + }) + // Convey("test dfsTable less than 1024 rows", func() { + // rowNum = 1023 + // _, err = db.RunScript(CreateScript(rowNum)) + // So(err, ShouldBeNil) + // Convey("Test select bool col from dfsTable:", func() { + // s, err := db.RunScript("select boolv from loadTable(dbName, `pt)") + // So(err, ShouldBeNil) + // memTable := s.(*model.Table) + // reBool := memTable.GetColumn(memTable.GetColumnNames()[0]) + // So(reBool.GetDataType(), ShouldEqual, model.DtBool) + // So(reBool.GetDataForm(), ShouldResemble, model.DfVector) + // So(reBool.Rows(), ShouldEqual, rowNum) + // re := reBool.Data.Value() + // fmt.Printf("\nre %v", re) + // tmp := []bool{true, false, true, false, false, true, true} + // fmt.Printf("\ntmp %v", tmp) + // var j int + // for i := 0; i < reBool.Rows(); i++ { + // if j < len(tmp) { + // // assert.Equal(t, re[i], tmp[j]) + // So(re[i], ShouldEqual, tmp[j]) + // j += 1 + // } else { + // j = 0 + // // assert.Equal(t, re[i], tmp[j]) + // So(re[i], ShouldEqual, tmp[j]) + // j += 1 + // } + // } + // }) + // }) + }) +} diff --git a/test/dropPartition_tabletMultiple_test.go b/test/dropPartition_tabletMultiple_test.go new file mode 100644 index 0000000..dcf7f84 --- /dev/null +++ b/test/dropPartition_tabletMultiple_test.go @@ -0,0 +1,805 @@ +package test + +import ( + "context" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func TestDropPartition_tabletMultiple(t *testing.T) { + Convey("Test_DropPartition_tabletMultiple_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Drop all Databases", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + re, err := ddb.RunScript(`existsDatabase("` + dbPath + `")`) + So(err, ShouldBeNil) + isExitsDatabase := re.(*model.Scalar).DataType.Value() + So(isExitsDatabase, ShouldBeFalse) + } + }) + Convey("Test_DropPartition_tabletMultiple_range_drop_single:", func() { + Convey("Test_DropPartition_tabletMultiple_range_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "10001", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/1_10001'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_range_drop_all_tables:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "10001", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/1_10001'") + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, "'/1_10001'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletMultiple_range_drop_multiple:", func() { + Convey("Test_DropPartition_tabletMultiple_range_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "30001", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/1_10001', '/10001_20001', '/20001_30001']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_range_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "30001", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/1_10001', '/10001_20001', '/20001_30001']`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `['/1_10001', '/10001_20001', '/20001_30001']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletMultiple_hash_drop_single:", func() { + Convey("Test_DropPartition_tabletMultiple_hash_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "10", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id != %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/Key0'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_hash_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "10", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id != %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/Key0'") + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, "'/Key0'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletMultiple_hash_drop_multiple:", func() { + Convey("Test_DropPartition_tabletMultiple_hash_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2..9", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id in %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/Key0', '/Key1']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_hash_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2..9", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id in %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/Key0', '/Key1']`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `['/Key0', '/Key1']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletMultiple_value_drop_single:", func() { + Convey("Test_DropPartition_tabletMultiple_value_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.01.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date != %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/20100101'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_value_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.01.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date != %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/20100101'") + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, "'/20100101'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletMultiple_value_drop_multiple:", func() { + Convey("Test_DropPartition_tabletMultiple_value_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(date, 2010.01.01+[0, 7, 14, 21])", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/20100101', '/20100108', '/20100115', '/20100122']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_value_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(date, 2010.01.01+[0, 7, 14, 21])", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/20100101', '/20100108', '/20100115', '/20100122']`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `['/20100101', '/20100108', '/20100115', '/20100122']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletMultiple_list_drop_single:", func() { + Convey("Test_DropPartition_tabletMultiple_list_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(sym,`AMD`QWE`CES)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/List0'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_list_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(sym,`AMD`QWE`CES)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/List0'") + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, "'/List0'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletMultiple_list_drop_multiple:", func() { + Convey("Test_DropPartition_tabletMultiple_list_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(sym,`DOP`ASZ`FSD`BBVC)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/List1', '/List2']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_list_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(sym,`DOP`ASZ`FSD`BBVC)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/List1', '/List2']`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `['/List1', '/List2']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level1_single:", func() { + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level1_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.02.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "2010.01.01") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level1_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.02.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "2010.01.01") + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, "2010.01.01") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level1_multiple:", func() { + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level1_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.03.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[2010.01.01, 2010.02.01]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level1_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.03.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[2010.01.01, 2010.02.01]`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `[2010.01.01, 2010.02.01]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level2_single:", func() { + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level2_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.02.01 or id >= 3", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[2010.01.01, 1]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level2_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.02.01 or id >= 3", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[2010.01.01, 1]`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `[2010.01.01, 1]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level2_multiple:", func() { + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level2_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "date >= 2010.03.01 or !between(id, 3:6)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[[2010.01.01,2010.02.01], [3,5]]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletMultiple_compo_range_range_drop_level2_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedbChunkGranularity(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "date >= 2010.03.01 or !between(id, 3:6)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[[2010.01.01,2010.02.01], [3,5]]`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `[[2010.01.01,2010.02.01], [3,5]]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + }) +} diff --git a/test/dropPartition_tabletSingle_test.go b/test/dropPartition_tabletSingle_test.go new file mode 100644 index 0000000..ffb3956 --- /dev/null +++ b/test/dropPartition_tabletSingle_test.go @@ -0,0 +1,858 @@ +package test + +import ( + "context" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func TestDropPartition_tabletSingle(t *testing.T) { + Convey("Test_DropPartition_tabletSingle_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Drop all Databases", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + re, err := ddb.RunScript(`existsDatabase("` + dbPath + `")`) + So(err, ShouldBeNil) + isExitsDatabase := re.(*model.Scalar).DataType.Value() + So(isExitsDatabase, ShouldBeFalse) + } + }) + Convey("Test_DropPartition_tabletSingle_range_drop_single:", func() { + Convey("Test_DropPartition_tabletSingle_range_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "10001", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/1_10001'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_range_drop_all_tables:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "10001", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/1_10001'") + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, "'/1_10001'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletSingle_range_drop_multiple:", func() { + Convey("Test_DropPartition_tabletSingle_range_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "30001", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/1_10001', '/10001_20001', '/20001_30001']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_range_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "30001", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/1_10001', '/10001_20001', '/20001_30001']`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `['/1_10001', '/10001_20001', '/20001_30001']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletSingle_hash_drop_single:", func() { + Convey("Test_DropPartition_tabletSingle_hash_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "10", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id != %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/Key0'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_hash_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "10", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id != %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/Key0'") + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, "'/Key0'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletSingle_hash_drop_multiple:", func() { + Convey("Test_DropPartition_tabletSingle_hash_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2..9", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id in %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/Key0', '/Key1']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_hash_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2..9", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where id in %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/Key0', '/Key1']`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `['/Key0', '/Key1']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletSingle_value_drop_single:", func() { + Convey("Test_DropPartition_tabletSingle_value_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.01.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date != %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/20100101'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_value_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.01.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date != %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/20100101'") + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, "'/20100101'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletSingle_value_drop_multiple:", func() { + Convey("Test_DropPartition_tabletSingle_value_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(date, 2010.01.01+[0, 7, 14, 21])", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/20100101', '/20100108', '/20100115', '/20100122']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_value_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(date, 2010.01.01+[0, 7, 14, 21])", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/20100101', '/20100108', '/20100115', '/20100122']`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `['/20100101', '/20100108', '/20100115', '/20100122']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletSingle_list_drop_single:", func() { + Convey("Test_DropPartition_tabletSingle_list_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(sym,`AMD`QWE`CES)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/List0'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_list_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(sym,`AMD`QWE`CES)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "'/List0'") + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, "'/List0'") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletSingle_list_drop_multiple:", func() { + Convey("Test_DropPartition_tabletSingle_list_drop_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(sym,`DOP`ASZ`FSD`BBVC)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/List1', '/List2']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_list_drop_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "!in(sym,`DOP`ASZ`FSD`BBVC)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `['/List1', '/List2']`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `['/List1', '/List2']`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level1_single:", func() { + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level1_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.02.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "2010.01.01") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level1_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.02.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, "2010.01.01") + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, "2010.01.01") + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level1_multiple:", func() { + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level1_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.03.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[2010.01.01, 2010.02.01]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level1_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.03.01", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[2010.01.01, 2010.02.01]`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `[2010.01.01, 2010.02.01]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level2_single:", func() { + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level2_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.02.01 or id >= 3", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[2010.01.01, 1]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level2_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "2010.02.01 or id >= 3", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[2010.01.01, 1]`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `[2010.01.01, 1]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level2_multiple:", func() { + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level2_only_one_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "date >= 2010.03.01 or !between(id, 3:6)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[[2010.01.01,2010.02.01], [3,5]]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, originTable2) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_DropPartition_tabletSingle_compo_range_range_drop_level2_all_table:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "date >= 2010.03.01 or !between(id, 3:6)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName1, DfsDBPath, `[[2010.01.01,2010.02.01], [3,5]]`) + So(err, ShouldBeNil) + err = DropPartition(ddb, DfsTBName2, DfsDBPath, `[[2010.01.01,2010.02.01], [3,5]]`) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) + Convey("Test_DropPartition_tabletSingle_SetDBHandle:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + ddbScript := ` + dbPath="` + DfsDBPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db1=database('', RANGE, 2010.01M+0..12) + db2=database('', RANGE, 1 3 5 7 9 11) + db=database(dbPath, COMPO, [db1,db2]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + DfsTBName1 + `",["date", "id"]).append!(tdata) + db.createPartitionedTable(tdata,"` + DfsTBName2 + `",["date", "id"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + originTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + originTable2, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re := CompareTables(originTable1, originTable2) + So(err, ShouldBeNil) + So(re, ShouldBeTrue) + rs, err := LoadTableBySQL(ddb, "date >= 2010.03.01 or !between(id, 3:6)", "select * from loadTable('"+DfsDBPath+"','"+DfsTBName1+"') where date >= %s", DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + t1 := new(api.DropPartitionRequest). + SetPartitionPaths(`[[2010.01.01,2010.02.01], [3,5]]`). + SetDBPath(DfsDBPath). + SetTableName(DfsTBName1).SetDBHandle("db") + err = ddb.DropPartition(t1) + So(err, ShouldBeNil) + t2 := new(api.DropPartitionRequest). + SetPartitionPaths(`[[2010.01.01,2010.02.01], [3,5]]`). + SetDBPath(DfsDBPath). + SetTableName(DfsTBName2).SetDBHandle("db") + err = ddb.DropPartition(t2) + So(err, ShouldBeNil) + reTable1, err := LoadTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + reTable2, err := LoadTable(ddb, DfsTBName2, DfsDBPath) + So(err, ShouldBeNil) + reData1 := CompareTables(reTable1, rs) + So(reData1, ShouldBeTrue) + reData2 := CompareTables(reTable2, rs) + So(reData2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + }) + }) +} diff --git a/test/existsDatabase_test.go b/test/existsDatabase_test.go new file mode 100644 index 0000000..3ba27a4 --- /dev/null +++ b/test/existsDatabase_test.go @@ -0,0 +1,251 @@ +package test + +import ( + "context" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func TestExistDatabaseAndDropDatabase(t *testing.T) { + Convey("Test ExistDatabase and dropDatabase prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Drop all Databases", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + re, err := ddb.RunScript(`existsDatabase("` + dbPath + `")`) + So(err, ShouldBeNil) + isExitsDatabase := re.(*model.Scalar).DataType.Value() + So(isExitsDatabase, ShouldBeFalse) + } + }) + Convey("Test_ExistDatabase_wrong_db_exception", func() { + _, err := ddb.RunScript(`existsDatabase(shjbdj)`) + So(err, ShouldNotBeNil) + }) + Convey("Test_ExistDatabase_and_dropDatabase_dfs_dimension:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsDimensiondb(DfsDBPath, TbName1, TbName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_dfs_range:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_dfs_hash:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_dfs_value:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_dfs_list:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_dfs_compo_range_range:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_dfs_compo_range_value:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeValuedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_dfs_compo_range_list:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeListdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_dfs_compo_range_hash:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_disk_unpartitioned_table:", func() { + re1, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskUnpartitioneddb(DiskDBPath, TbName1, TbName2) + re2, err := ExistsTable(ddb, DiskDBPath, TbName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_create_disk_range_db:", func() { + re1, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskRangedb(DiskDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_create_disk_hash_db:", func() { + re1, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskHashdb(DiskDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_create_disk_value_db:", func() { + re1, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskValuedb(DiskDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_create_disk_list_db:", func() { + re1, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskListdb(DiskDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_ExistDatabase_and_dropDatabase_create_compo_range_range_db:", func() { + re1, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskCompoRangeRangedb(DiskDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + }) +} diff --git a/test/existsTable_test.go b/test/existsTable_test.go new file mode 100644 index 0000000..7b9aa4d --- /dev/null +++ b/test/existsTable_test.go @@ -0,0 +1,328 @@ +package test + +import ( + "context" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func TestDropTableException(t *testing.T) { + Convey("Test_existsTable_and_dropTable_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_existsTable_dropDatabase", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + re, err := ddb.RunScript(`existsDatabase("` + dbPath + `")`) + So(err, ShouldBeNil) + isExitsDatabase := re.(*model.Scalar).DataType.Value() + So(isExitsDatabase, ShouldBeFalse) + } + }) + Convey("Test_dropTable_wrong_Table_exception", func() { + re1, err := ExistsTable(ddb, DfsDBPath, "mt1") + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsDimensiondb(DfsDBPath, TbName1, TbName2) + err = DropTable(ddb, "mt", DfsDBPath) + So(err, ShouldNotBeNil) + }) + Convey("Test_dropTable_wrong_dbpath_exception", func() { + re1, err := ExistsTable(ddb, "dfs://test1", TbName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsDimensiondb(DfsDBPath, TbName1, TbName2) + err = DropTable(ddb, TbName1, "dfs://test1") + So(err, ShouldNotBeNil) + }) + Convey("Test_dropTable_only_DBHandle_dbPath_exception", func() { + re1, err := ExistsTable(ddb, DfsDBPath, TbName1) + So(err, ShouldBeNil) + if re1 == true { + _, err = ddb.RunScript("dropDatabase('" + DfsDBPath + "')") + So(err, ShouldBeNil) + } + ddbScript := ` + dbPath="` + DfsDBPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, RANGE, 1..10) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createTable(tdata, "` + TbName1 + `").append!(tdata) + db.createTable(tdata, "` + TbName2 + `").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + So(err, ShouldBeNil) + t := new(api.DropTableRequest). + SetDBPath(DfsDBPath).SetDBHandle("db") + err = ddb.DropTable(t) + So(err, ShouldNotBeNil) + }) + err = ddb.Close() + So(err, ShouldBeNil) + }) +} + +func TestExistsTableAndDropTable(t *testing.T) { + Convey("Test_existsTable_and_dropTable_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_existsTable_dropDatabase", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + re, err := ddb.RunScript(`existsDatabase("` + dbPath + `")`) + So(err, ShouldBeNil) + isExitsDatabase := re.(*model.Scalar).DataType.Value() + So(isExitsDatabase, ShouldBeFalse) + } + }) + Convey("Test_existsTable_dfs_dimension", func() { + re1, err := ExistsTable(ddb, DfsDBPath, TbName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + re2, err := ExistsTable(ddb, DfsDBPath, TbName2) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + CreateDfsDimensiondb(DfsDBPath, TbName1, TbName2) + re3, err := ExistsTable(ddb, DfsDBPath, TbName1) + So(err, ShouldBeNil) + So(re3, ShouldBeTrue) + re4, err := ExistsTable(ddb, DfsDBPath, TbName2) + So(err, ShouldBeNil) + So(re4, ShouldBeTrue) + err = DropTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + err = DropTable(ddb, TbName2, DfsDBPath) + So(err, ShouldBeNil) + re5, err := ExistsTable(ddb, DfsDBPath, TbName1) + So(err, ShouldBeNil) + So(re5, ShouldBeFalse) + re6, err := ExistsTable(ddb, DfsDBPath, TbName2) + So(err, ShouldBeNil) + So(re6, ShouldBeFalse) + }) + Convey("Test_existsTable_dfs_value", func() { + re1, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_dfs_range", func() { + re1, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_dfs_hash", func() { + re1, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_dfs_list", func() { + re1, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_dfs_compo_range_range", func() { + re1, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_dfs_compo_range_value", func() { + re1, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeValuedb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_dfs_compo_range_list", func() { + re1, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeListdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_dfs_compo_range_hash", func() { + re1, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeHashdb(DfsDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_disk_unpartitioned_table", func() { + re1, err := ExistsTable(ddb, DfsDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskUnpartitioneddb(DiskDBPath, TbName1, TbName2) + re2, err := ExistsTable(ddb, DiskDBPath, TbName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, TbName1, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DiskDBPath, "tdata") + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + err = DropDatabase(ddb, DiskDBPath) + So(err, ShouldBeNil) + }) + Convey("Test_existsTable_disk_range", func() { + re1, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskRangedb(DiskDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_disk_value", func() { + re1, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskValuedb(DiskDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_disk_list", func() { + re1, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskListdb(DiskDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_disk_hash", func() { + re1, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskHashdb(DiskDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_existsTable_disk_compo_range_range", func() { + re1, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDiskCompoRangeRangedb(DiskDBPath, DfsTBName1, DfsTBName2) + re2, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re2, ShouldBeTrue) + err = DropTable(ddb, DfsTBName1, DiskDBPath) + So(err, ShouldBeNil) + re3, err := ExistsTable(ddb, DiskDBPath, DfsTBName1) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + }) +} diff --git a/test/loadTableBySQL_test.go b/test/loadTableBySQL_test.go new file mode 100644 index 0000000..9fb1941 --- /dev/null +++ b/test/loadTableBySQL_test.go @@ -0,0 +1,189 @@ +package test + +import ( + "context" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func TestLoadTableBySQL(t *testing.T) { + Convey("Test LoadTableBySQL prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Drop all Databases", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + re, err := ddb.RunScript(`existsDatabase("` + dbPath + `")`) + So(err, ShouldBeNil) + isExitsDatabase := re.(*model.Scalar).DataType.Value() + So(isExitsDatabase, ShouldBeFalse) + } + }) + Convey("Test_LoadTableBySQL_dfs_dimension:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsDimensiondb(DfsDBPath, TbName1, TbName2) + _, err = LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DfsDBPath+"','"+TbName1+"') where date in %s", DfsDBPath, TbName1) + So(err, ShouldNotBeNil) + }) + Convey("Test_LoadTableBySQL_dfs_range:", func() { + CreateDfsRangedb(DfsDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `") where date in [2010.01.05, 2010.01.15, 2010.01.19]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DfsDBPath+"','"+TbName1+"') where date in %s", DfsDBPath, TbName1) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + }) + Convey("Test_LoadTableBySQL_dfs_hash:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `") where date in [2010.01.05, 2010.01.15, 2010.01.19]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DfsDBPath+"','"+TbName1+"') where date in %s", DfsDBPath, TbName1) + So(err, ShouldBeNil) + re2 := CompareTablesDataformTable(exTmp, reTmp) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_LoadTableBySQL_dfs_value:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedb(DfsDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `") where date in [2010.01.05, 2010.01.15, 2010.01.19]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DfsDBPath+"','"+TbName1+"') where date in %s", DfsDBPath, TbName1) + So(err, ShouldBeNil) + re2 := CompareTablesDataformTable(exTmp, reTmp) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_LoadTableBySQL_dfs_list:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdb(DfsDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `") where date in [2010.01.05, 2010.01.15, 2010.01.19]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DfsDBPath+"','"+TbName1+"') where date in %s", DfsDBPath, TbName1) + So(err, ShouldBeNil) + re2 := CompareTablesDataformTable(exTmp, reTmp) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_LoadTableBySQL_dfs_compo:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `") where date in [2010.01.05, 2010.01.15, 2010.01.19]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DfsDBPath+"','"+TbName1+"') where date in %s", DfsDBPath, TbName1) + So(err, ShouldBeNil) + re2 := CompareTablesDataformTable(exTmp, reTmp) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_LoadTableBySQL_disk_unpartitioned:", func() { + CreateDiskUnpartitioneddb(DiskDBPath, TbName1, TbName2) + _, err := LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DiskDBPath+"','"+TbName1+"') where date in %s", DiskDBPath, TbName1) + So(err, ShouldNotBeNil) + }) + Convey("Test_LoadTableBySQL_disk_range:", func() { + CreateDiskRangedb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `") where date in [2010.01.05, 2010.01.15, 2010.01.19]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DiskDBPath+"','"+TbName1+"') where date in %s", DiskDBPath, TbName1) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTableBySQL_disk_hash:", func() { + CreateDiskHashdb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `") where date in [2010.01.05, 2010.01.15, 2010.01.19]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DiskDBPath+"','"+TbName1+"') where date in %s", DiskDBPath, TbName1) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTableBySQL_disk_value:", func() { + CreateDiskValuedb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `") where date in [2010.01.05, 2010.01.15, 2010.01.19]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DiskDBPath+"','"+TbName1+"') where date in %s", DiskDBPath, TbName1) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTableBySQL_disk_list:", func() { + CreateDiskListdb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `") where date in [2010.01.05, 2010.01.15, 2010.01.19]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DiskDBPath+"','"+TbName1+"') where date in %s", DiskDBPath, TbName1) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTableBySQL_disk_compo_range_range:", func() { + CreateDiskCompoRangeRangedb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `") where date in [2010.01.05, 2010.01.15, 2010.01.19]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTableBySQL(ddb, "[2010.01.05, 2010.01.15, 2010.01.19]", "select * from loadTable('"+DiskDBPath+"','"+TbName1+"') where date in %s", DiskDBPath, TbName1) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + err = ddb.Close() + So(err, ShouldBeNil) + }) +} diff --git a/test/loadTable_test.go b/test/loadTable_test.go new file mode 100644 index 0000000..7d2e5f7 --- /dev/null +++ b/test/loadTable_test.go @@ -0,0 +1,430 @@ +package test + +import ( + "context" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func TestLoadTable(t *testing.T) { + Convey("Test LoadTable prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Drop all Databases", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + re, err := ddb.RunScript(`existsDatabase("` + dbPath + `")`) + So(err, ShouldBeNil) + isExitsDatabase := re.(*model.Scalar).DataType.Value() + So(isExitsDatabase, ShouldBeFalse) + } + }) + Convey("Test_LoadTable_dfs_dimension:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsDimensiondb(DfsDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + re2 := CompareTablesDataformTable(exTmp, reTmp) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_range:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedb(DfsDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + re2 := CompareTablesDataformTable(exTmp, reTmp) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_range_memoryMode_exception:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedb(DfsDBPath, TbName1, TbName2) + _, err = LoadTableMemoryMode(ddb, TbName1, DfsDBPath, true) + So(err, ShouldNotBeNil) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_range_partitions_exception:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsRangedb(DfsDBPath, TbName1, TbName2) + _, err = LoadTablePartitions(ddb, TbName1, DfsDBPath, "5000") + So(err, ShouldNotBeNil) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_hash:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + re2 := CompareTablesDataformTable(exTmp, reTmp) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_hash_memoryMode_exception:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, TbName1, TbName2) + _, err = LoadTableMemoryMode(ddb, TbName1, DfsDBPath, true) + So(err, ShouldNotBeNil) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_hash_partitions_exception:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsHashdb(DfsDBPath, TbName1, TbName2) + _, err = LoadTablePartitions(ddb, TbName1, DfsDBPath, "1") + So(err, ShouldNotBeNil) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_value:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedb(DfsDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + re2 := CompareTablesDataformTable(exTmp, reTmp) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_value_memoryMode_exception:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedb(DfsDBPath, TbName1, TbName2) + _, err = LoadTableMemoryMode(ddb, TbName1, DfsDBPath, true) + So(err, ShouldNotBeNil) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_value_partitions_exception:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsValuedb(DfsDBPath, TbName1, TbName2) + _, err = LoadTablePartitions(ddb, TbName1, DfsDBPath, "2010.01.01") + So(err, ShouldNotBeNil) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_list:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdb(DfsDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + re2 := CompareTablesDataformTable(exTmp, reTmp) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_list_memoryMode_exception:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdb(DfsDBPath, TbName1, TbName2) + _, err = LoadTableMemoryMode(ddb, TbName1, DfsDBPath, true) + So(err, ShouldNotBeNil) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_list_partitions_exception:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsListdb(DfsDBPath, TbName1, TbName2) + _, err = LoadTablePartitions(ddb, TbName1, DfsDBPath, "`DOP") + So(err, ShouldNotBeNil) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_compo:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DfsDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DfsDBPath) + So(err, ShouldBeNil) + re2 := CompareTablesDataformTable(exTmp, reTmp) + So(re2, ShouldBeTrue) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re3, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re3, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_compo_range_range_memoryMode_exception:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, TbName1, TbName2) + _, err = LoadTableMemoryMode(ddb, TbName1, DfsDBPath, true) + So(err, ShouldNotBeNil) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + }) + Convey("Test_LoadTable_dfs_compo_range_range_partitions_exception:", func() { + re1, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re1, ShouldBeFalse) + CreateDfsCompoRangeRangedb(DfsDBPath, TbName1, TbName2) + _, err = LoadTablePartitions(ddb, TbName1, DfsDBPath, "2010.01.01") + So(err, ShouldNotBeNil) + err = DropDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + re2, err := ExistsDatabase(ddb, DfsDBPath) + So(err, ShouldBeNil) + So(re2, ShouldBeFalse) + }) + Convey("Test_LoadTable_disk_unpartitioned:", func() { + CreateDiskUnpartitioneddb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DiskDBPath) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTable_disk_range:", func() { + CreateDiskRangedb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DiskDBPath) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTable_disk_range_partition:", func() { + CreateDiskRangedb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `") where id < 20001`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTablePartitions(ddb, TbName1, DiskDBPath, `[5000, 15000]`) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTable_disk_range_memoryMode:", func() { + CreateDiskRangedb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + before, _ := ddb.RunScript("exec memSize from getSessionMemoryStat()") + reTmp, err := LoadTableMemoryMode(ddb, TbName1, DiskDBPath, true) + So(err, ShouldBeNil) + after, _ := ddb.RunScript("exec memSize from getSessionMemoryStat()") + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + before1 := before.(*model.Vector).Data.Value()[1] + after1 := after.(*model.Vector).Data.Value()[1] + So(after1, ShouldBeGreaterThanOrEqualTo, before1) + }) + Convey("Test_LoadTable_disk_hash:", func() { + CreateDiskHashdb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DiskDBPath) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTable_disk_hash_partition:", func() { + CreateDiskHashdb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `") where id in [1, 3, 5]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTablePartitions(ddb, TbName1, DiskDBPath, "[1, 3, 5]") + So(err, ShouldBeNil) + re := CompareTablesDataformTable(exTmp, reTmp) + So(re, ShouldBeTrue) + }) + Convey("Test_LoadTable_disk_hash_memoryMode:", func() { + CreateDiskHashdb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + before, _ := ddb.RunScript("exec memSize from getSessionMemoryStat()") + reTmp, err := LoadTableMemoryMode(ddb, TbName1, DiskDBPath, true) + So(err, ShouldBeNil) + after, _ := ddb.RunScript("exec memSize from getSessionMemoryStat()") + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + before1 := before.(*model.Vector).Data.Value()[1] + after1 := after.(*model.Vector).Data.Value()[1] + So(after1, ShouldBeGreaterThanOrEqualTo, before1) + }) + Convey("Test_LoadTable_disk_value:", func() { + CreateDiskValuedb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DiskDBPath) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTable_disk_value_partition:", func() { + CreateDiskValuedb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `") where id in [2010.01.01, 2010.01.30]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTablePartitions(ddb, TbName1, DiskDBPath, "[2010.01.01, 2010.01.30]") + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTable_disk_list:", func() { + CreateDiskListdb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DiskDBPath) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTable_disk_list_partition:", func() { + CreateDiskListdb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `") where sym in ["DOP", "ASZ", "FSD", "BBVC","AWQ","DS"]`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTablePartitions(ddb, TbName1, DiskDBPath, `["DOP", "FSD", "AWQ"]`) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTable_disk_list_memoryMode:", func() { + CreateDiskValuedb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + before, _ := ddb.RunScript("exec memSize from getSessionMemoryStat()") + reTmp, err := LoadTableMemoryMode(ddb, TbName1, DiskDBPath, true) + So(err, ShouldBeNil) + after, _ := ddb.RunScript("exec memSize from getSessionMemoryStat()") + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + before1 := before.(*model.Vector).Data.Value()[1] + after1 := after.(*model.Vector).Data.Value()[1] + So(after1, ShouldBeGreaterThanOrEqualTo, before1) + }) + Convey("Test_LoadTable_disk_compo_range_range:", func() { + CreateDiskCompoRangeRangedb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `")`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTable(ddb, TbName1, DiskDBPath) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + Convey("Test_LoadTable_disk_compo_range_range_partition:", func() { + CreateDiskCompoRangeRangedb(DiskDBPath, TbName1, TbName2) + tmp, err := ddb.RunScript(`select * from loadTable("` + DiskDBPath + `", "` + TbName1 + `") where date between 2010.01.01:2010.01.31 or date between 2010.04.01:2010.04.30`) + So(err, ShouldBeNil) + exTmp := tmp.(*model.Table) + reTmp, err := LoadTablePartitions(ddb, TbName1, DiskDBPath, `[2010.01.01, 2010.04.25]`) + So(err, ShouldBeNil) + re1 := CompareTablesDataformTable(exTmp, reTmp) + So(re1, ShouldBeTrue) + }) + }) +} diff --git a/test/loadText_test.go b/test/loadText_test.go new file mode 100644 index 0000000..d040da7 --- /dev/null +++ b/test/loadText_test.go @@ -0,0 +1,44 @@ +package test + +import ( + "context" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func TestLoadTest(t *testing.T) { + Convey("test_loadText_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + data := setup.DATADIR + "/TradesSmall.csv" + Convey("test_loadText_filName_not_exist_exception", func() { + loadT := new(api.LoadTextRequest). + SetFileName("mssn.csv") + _, err := ddb.LoadText(loadT) + So(err, ShouldNotBeNil) + }) + Convey("test_loadText_para_filename", func() { + tmp, err := ddb.RunScript("select * from loadText(\"" + data + "\")") + ex := tmp.(*model.Table) + So(err, ShouldBeNil) + re, err := LoadTextFileName(ddb, data) + So(err, ShouldBeNil) + result := CompareTablesDataformTable(ex, re) + So(result, ShouldBeTrue) + }) + Convey("test_loadText_para_delimiter", func() { + tmp, err := ddb.RunScript("select * from loadText(\"" + data + "\", ';')") + ex := tmp.(*model.Table) + So(err, ShouldBeNil) + re, err := LoadTextDelimiter(ddb, data, ";") + So(err, ShouldBeNil) + result := CompareTablesDataformTable(ex, re) + So(result, ShouldBeTrue) + }) + So(ddb.Close(), ShouldBeNil) + }) +} diff --git a/test/multigoroutinetable_test.go b/test/multigoroutinetable_test.go new file mode 100644 index 0000000..bf720ec --- /dev/null +++ b/test/multigoroutinetable_test.go @@ -0,0 +1,5062 @@ +package test + +import ( + "context" + "fmt" + "strconv" + "sync" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + mtw "github.com/dolphindb/api-go/multigoroutinetable" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +const ( + DBdfsPath = "dfs://test_multiGoroutineTable" + DBDiskPath = setup.WORKDIR + "/test_multiGoroutineTable" + DfsTableName1 = "pt1" + DfsTableName2 = "pt2" +) + +var waitGroup sync.WaitGroup + +func CreateTimeList(n int, timeFomat string, timeList []string) []time.Time { + ex := []time.Time{} + for i := 0; i < len(timeList); i++ { + timex, _ := time.Parse(timeFomat, timeList[i]) + ex = append(ex, timex) + } + return ex +} + +func CheckListEqual(re []interface{}, ex []byte) bool { + for i := 0; i < len(re); i++ { + if re[i] != ex[i] { + return false + } + } + return true +} + +func insertalldatatype(mtt *mtw.MultiGoroutineTable) error { + timeList := []string{"1969/12/31 13:30:10.008", "1970/01/13 10:28:10.485", "2006/06/13 23:29:10.008", "1970/06/13 13:56:14.123", "1846/06/13 10:14:02.456", "2024/06/13 12:34:14.008"} + colBool := []byte{1, 0, 1, 0, 0, 0} + colchar := []byte{2, 3, 4, 6, 5, 8} + colshort := []int16{2, 3, 8, 10, 11, 15} + colInt := []int32{2, 3, 8, 10, 11, 15} + collong := []int64{2, 3, 8, 10, 11, 15} + coldate := []time.Time{time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), + time.Date(1969, time.Month(12), 31, 1, 1, 0, 0, time.UTC), + time.Date(1970, time.Month(1), 1, 1, 1, 0, 0, time.UTC), + time.Date(1971, time.Month(3), 12, 1, 1, 0, 0, time.UTC), + time.Date(1969, time.Month(11), 1, 1, 1, 0, 0, time.UTC), + time.Date(2024, time.Month(3), 1, 1, 1, 0, 0, time.UTC)} + colmonthv := []time.Time{time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), + time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), + time.Date(1970, time.Month(1), 1, 1, 1, 0, 0, time.UTC), + time.Date(1971, time.Month(3), 1, 1, 1, 0, 0, time.UTC), + time.Date(1969, time.Month(11), 1, 1, 1, 0, 0, time.UTC), + time.Date(2024, time.Month(3), 1, 1, 1, 0, 0, time.UTC)} + coltimestamp := CreateTimeList(6, "15:04:05.041", timeList) + colfloat := []float32{2.3, 4.6, 5.5, 4.9, 55.6, 22.3} + coldouble := []float64{2.3, 4.6, 5.5, 4.9, 55.6, 22.3} + colstring := []string{"AAPL", "AAPL", "GOOG", "GOOG", "MSFT", "MSFT", "IBM", "IBM", "YHOO", "YHOO"} + colsym := []string{"AAPL", "AAPL", "GOOG", "GOOG", "MSFT", "MSFT", "IBM", "IBM", "YHOO", "YHOO"} + coluuid := []string{"88b4ac61-1a43-94ca-1352-4da53cda28bd", "9e495846-1e79-2ca1-bb9b-cf62c3556976", "88b4ac61-1a43-94ca-1352-4da53cda28bd", "9e495846-1e79-2ca1-bb9b-cf62c3556976", "88b4ac61-1a43-94ca-1352-4da53cda28bd", "9e495846-1e79-2ca1-bb9b-cf62c3556976"} + colInt128 := []string{"af5cad08c356296a0544b6bf11556484", "af5cad08c356296a0544b6bf11556484", "af5cad08c356296a0544b6bf11556484", "af5cad08c356296a0544b6bf11556484", "af5cad08c356296a0544b6bf11556484", "af5cad08c356296a0544b6bf11556484"} + colipaddr := []string{"3d5b:14af:b811:c475:5c90:f554:45aa:98a6", "3d5b:14af:b811:c475:5c90:f554:45aa:98a6", "3d5b:14af:b811:c475:5c90:f554:45aa:98a6", "3d5b:14af:b811:c475:5c90:f554:45aa:98a6", "3d5b:14af:b811:c475:5c90:f554:45aa:98a6", "3d5b:14af:b811:c475:5c90:f554:45aa:98a6"} + for i := 0; i < 6; i++ { + err := mtt.Insert(colBool[i], colchar[i], colshort[i], colInt[i], collong[i], + coldate[i], colmonthv[i], coltimestamp[i], colfloat[i], coldouble[i], colstring[i], colsym[i], coluuid[i], colInt128[i], colipaddr[i]) + if err != nil { + return err + } + } + return nil +} + +func threadinsertData(mtt *mtw.MultiGoroutineTable, n int) { + i := 0 + for { + err := mtt.Insert("AAPL"+strconv.Itoa(i%10), + time.Date(1969, time.Month(12), i%10+1, 23, i%10, 50, 000, time.UTC), + float64(22.5)+float64(i), float64(14.6)+float64(i), int32(i%10), float64(i)) + AssertNil(err) + if err != nil { + fmt.Println(err) + break + } + if i == n-1 && err == nil { + break + } + i++ + } + waitGroup.Done() +} + +func insertDataTotable(n int, tableName string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + var symarr []string + var datetimearr []time.Time + var floatarr1 []float64 + var floatarr2 []float64 + var intarr []int32 + var floatarr3 []float64 + for i := 0; i < n; i++ { + symarr = append(symarr, "AAPL"+strconv.Itoa(i%10)) + datetimearr = append(datetimearr, time.Date(1969, time.Month(12), i%10+1, 23, i%10, 50, 000, time.UTC)) + floatarr1 = append(floatarr1, float64(22.5)+float64(i)) + floatarr2 = append(floatarr2, float64(14.6)+float64(i)) + intarr = append(intarr, int32(i%10)) + floatarr3 = append(floatarr3, float64(i)) + } + sym, _ := model.NewDataTypeListWithRaw(model.DtString, symarr) + tradeDatev, _ := model.NewDataTypeListWithRaw(model.DtDatetime, datetimearr) + tradePrice, _ := model.NewDataTypeListWithRaw(model.DtDouble, floatarr1) + vwap, _ := model.NewDataTypeListWithRaw(model.DtDouble, floatarr2) + volume, _ := model.NewDataTypeListWithRaw(model.DtInt, intarr) + valueTrade, _ := model.NewDataTypeListWithRaw(model.DtDouble, floatarr3) + tmp := model.NewTable([]string{"sym", "tradeDate", "tradePrice", "vwap", "volume", "valueTrade"}, + []*model.Vector{model.NewVector(sym), model.NewVector(tradeDatev), model.NewVector(tradePrice), + model.NewVector(vwap), model.NewVector(volume), model.NewVector(valueTrade)}) + _, err = ddb.RunFunc("tableInsert{"+tableName+"}", []model.DataForm{tmp}) + AssertNil(err) + AssertNil(ddb.Close()) +} + +func TestMultiGoroutineTable_exception(t *testing.T) { + Convey("test_multiGoroutineTable_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + Convey("Drop all Databases", func() { + dbPaths := []string{DBdfsPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + re, err := ddb.RunScript(`existsDatabase("` + dbPath + `")`) + So(err, ShouldBeNil) + isExitsDatabase := re.(*model.Scalar).DataType.Value() + So(isExitsDatabase, ShouldBeFalse) + } + }) + Convey("test_multiGoroutineTable_exception", func() { + Convey("test_multiGoroutineTable_error_hostName_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: "wrongHost" + strconv.Itoa(setup.Port), + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_error_port_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.IP + ":-4", + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_error_userId_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: "dabsk", + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_error_password_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: "-2", + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_error_dbPath_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: "dhb", + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_dbPath_null_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: "", + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_error_TableName_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: "hsb", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_TableName_null_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: "", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_Throttle_less_than_0_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: -1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_Throttle_0_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 0, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_BatchSize_equal_0_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 0, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_BatchSize_less_than_0_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: -1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_GoroutineCount_equal_0_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 0, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_GoroutineCount_less_than_0_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: -3, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_userid_no_grant_write_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + scriptusernograntwrite := ` + def test_user(){ + createUser("mark", "123456") + grant("mark", TABLE_READ, "*") + } + rpc(getControllerAlias(), test_user) + ` + _, err = ddb.RunScript(scriptusernograntwrite) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: "mark", + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), int64(1)) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + errorInfo := mtt.GetStatus().ErrMsg + So(errorInfo, ShouldResemble, "client error response. Not granted to write data to table "+DBdfsPath+"/"+DfsTableName1) + _, err = ddb.RunScript(`rpc(getControllerAlias(), deleteUser, "mark")`) + So(err, ShouldBeNil) + _, err = ddb.RunScript(`dropDatabase("` + DBdfsPath + `")`) + So(err, ShouldBeNil) + }) + Convey("test_multithreadTableWriterTest_Memory_Table_mutilthread_unspecified_partitioncolexception", func() { + scriptMemoryTable := "t = table(1000:0, `id`x, [LONG, LONG]);share t as shareTable;" + _, err = ddb.RunScript(scriptMemoryTable) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "", + Database: "", + TableName: "shareTable", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + _, err = ddb.RunScript("undef(`shareTable, SHARED)") + So(err, ShouldBeNil) + }) + Convey("test_multithreadTableWriterTest_DFS_Table_mutilthread_specified_not_partitioncolexception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "mt", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multithreadTableWriterTest_DFS_Table_partitioncolnull_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multithreadTableWriterTest_DFS_Table_GoroutineCount_>1_partitioncolnot_partitioncolumn_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 3, + BatchSize: 1, + Throttle: 1, + PartitionCol: "id", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + }) + Convey("test_multithreadTableWriterTest_insert_different_data_type_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert("bjsa", int64(1)) + So(err, ShouldNotBeNil) + mtt.WaitForGoroutineCompletion() + So(err.Error(), ShouldResemble, "the type of in must be time.Time when datatype is DtDate") + count, err := ddb.RunScript("exec count(*) from loadTable('" + DBdfsPath + "', '" + DfsTableName1 + "')") + So(err, ShouldBeNil) + dataNum := count.(*model.Scalar).Value() + So(dataNum, ShouldEqual, 0) + _, err = ddb.RunScript(`dropDatabase("` + DBdfsPath + `")`) + So(err, ShouldBeNil) + }) + Convey("test_multithreadTableWriterTest_Memory_Table_TableName_empty_exception", func() { + scriptMemoryTable := "t = table(1000:0, `id`x, [LONG, LONG]);share t as shareTable;" + _, err = ddb.RunScript(scriptMemoryTable) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "id", + Database: "", + TableName: "", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + _, err = ddb.RunScript("undef(`shareTable, SHARED)") + So(err, ShouldBeNil) + }) + Convey("test_multiGoroutineTable_insert_column_less_than_expected_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC)) + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_insert_null_rows_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert() + So(err, ShouldNotBeNil) + }) + Convey("test_multiGoroutineTable_insert_column_morethan_expected_exception", func() { + scriptDFSHASH := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + datetest=table(1000:0,["datev", "id"],[DATE,LONG]) + db=database("` + DBdfsPath + `",HASH, [MONTH,10]) + pt=db.createPartitionedTable(datetest,"` + DfsTableName1 + `",'datev') + ` + _, err = ddb.RunScript(scriptDFSHASH) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), int64(10), int32(45)) + So(err, ShouldNotBeNil) + }) + Convey("test_multithreadTableWriterTest_datatype_exception", func() { + scriptGoroutineCount := "t = table(1000:0, `date`id`values,[TIMESTAMP,SYMBOL,INT]);share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "id", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + tb := make([][]model.DataType, 0) + for i := 0; i < 3; i++ { + rowData := make([]model.DataType, 0) + dt1, _ := model.NewDataType(model.DtDate, time.Date(2022, time.Month(1), i, 1, 1, 0, 0, time.UTC)) + rowData = append(rowData, dt1) + dt2, _ := model.NewDataType(model.DtString, "AAOL") + rowData = append(rowData, dt2) + dt3, _ := model.NewDataType(model.DtInt, int32(16+i)) + rowData = append(rowData, dt3) + tb = append(tb, rowData) + } + err = mtt.InsertUnwrittenData(tb) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + errmsg := mtt.GetStatus().ErrMsg + So(errmsg, ShouldEqual, "failed to set DataType(date) into DataTypeList(timestamp)") + unSetRows := mtt.GetStatus().UnSentRows + FailedRows := mtt.GetStatus().FailedRows + So(FailedRows+unSetRows, ShouldEqual, 3) + IsExit := mtt.GetStatus().IsExit + So(IsExit, ShouldEqual, true) + }) + Convey("TestMultiGoroutineTable_insert_dfs_value_value_ex", func() { + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", VALUE, 1969.12.01..1969.12.10)\n" + + "\tdb2=database(\"\", VALUE, 0..10)\n" + + "\tdb=database(Database, COMPO, [db1, db2], , \"OLAP\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\", \"volume\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "sym", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err = mtw.NewMultiGoroutineTable(opt) + So(err, ShouldNotBeNil) + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) + }) + }) +} + +func TestMultiGoroutineTable_all_data_type(t *testing.T) { + Convey("test_multithreadTableWriterTest_all_data_type", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + scriptalldatatype := ` + t = table(1000:0, ["boolv", "charv", "shortv", "intv", "longv", "datev", "monthv", "timestampv", "floatv", "doublev", "stringv", "sym", "uuidv", "int128v", "ipv"], + [BOOL, CHAR, SHORT, INT, LONG, DATE, MONTH, TIMESTAMP, FLOAT, DOUBLE, STRING, SYMBOL, UUID, INT128, IPADDR]); + share t as all_data_type` + _, err = ddb.RunScript(scriptalldatatype) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "all_data_type", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = insertalldatatype(mtt) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + ErrMsg := mtt.GetStatus().ErrMsg + So(ErrMsg, ShouldEqual, "") + re, err := ddb.RunScript("select * from all_data_type") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + reColNameList := reTable.GetColumnNames() + exColNameList := []string{"boolv", "charv", "shortv", "intv", "longv", "datev", "monthv", "timestampv", "floatv", "doublev", "stringv", "sym", "uuidv", "int128v", "ipv"} + So(reColNameList, ShouldResemble, exColNameList) + reboolv := reTable.GetColumnByName("boolv").Data.Value() + exboolv := []byte{1, 0, 1, 0, 0, 0} + CheckListEqual(reboolv, exboolv) + _, err = ddb.RunScript("undef(`all_data_type, SHARED)") + So(err, ShouldBeNil) + err = ddb.Close() + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_GoroutineCount(t *testing.T) { + Convey("test_multithreadTableWriterTest_GoroutineCount", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + scriptGoroutineCount := "t = table(1000:0, `date`id`values,[TIMESTAMP,SYMBOL,INT]);share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 1, + Throttle: 1, + PartitionCol: "id", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(2022, time.Month(1), 1, 1, 1, 0, 0, time.UTC), "AAOL", int32(45)) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(1), 1, 1, 1, 0, 0, time.UTC), "ONSL", int32(45)) + So(err, ShouldBeNil) + tb := make([][]model.DataType, 0) + for i := 0; i < 3; i++ { + rowData := make([]model.DataType, 0) + dt1, _ := model.NewDataType(model.DtTimestamp, time.Date(2022, time.Month(1), i, 1, 1, 0, 0, time.UTC)) + rowData = append(rowData, dt1) + dt2, _ := model.NewDataType(model.DtString, "AAOL") + rowData = append(rowData, dt2) + dt3, _ := model.NewDataType(model.DtInt, int32(16+i)) + rowData = append(rowData, dt3) + tb = append(tb, rowData) + } + err = mtt.InsertUnwrittenData(tb) + So(err, ShouldBeNil) + time.Sleep(3 * time.Second) + re, err := ddb.RunScript("select * from t1") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows()+mtt.GetStatus().UnSentRows+mtt.GetStatus().FailedRows, ShouldEqual, 5) + tb = make([][]model.DataType, 0) + for i := 0; i < 3; i++ { + rowData := make([]model.DataType, 0) + dt1, _ := model.NewDataType(model.DtTimestamp, time.Date(2022, time.Month(1), i, 1, 1, 0, 0, time.UTC)) + rowData = append(rowData, dt1) + dt2, _ := model.NewDataType(model.DtString, "ONSL") + rowData = append(rowData, dt2) + dt3, _ := model.NewDataType(model.DtInt, int32(16+i)) + rowData = append(rowData, dt3) + tb = append(tb, rowData) + } + err = mtt.InsertUnwrittenData(tb) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err = ddb.RunScript("select * from t1") + So(err, ShouldBeNil) + reTable = re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 8) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + err = ddb.Close() + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_null(t *testing.T) { + Convey("test_multiGoroutineTable_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("test_multithreadTableWriterTest_insert_all_null", func() { + scriptGoroutineCount := "t = table(1000:0, `boolv`charv`shortv`longv`datev`monthv`secondv`datetimev`timestampv`nanotimev`nanotimestampv`floatv`doublev`symbolv`stringv`uuidv`ipaddrv`int128v`intv`arrv`blobv," + + "[BOOL,CHAR,SHORT,LONG,DATE,MONTH,SECOND,DATETIME,TIMESTAMP,NANOTIME,NANOTIMESTAMP,FLOAT,DOUBLE,SYMBOL,STRING,UUID, IPADDR, INT128,INT,INT,BLOB]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "boolv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 1) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) + + Convey("test_multithreadTableWriterTest_insert_parted_null", func() { + scriptGoroutineCount := "t = table(1000:0, `boolv`charv`shortv`longv`datev`monthv`secondv`datetimev`timestampv`nanotimev`nanotimestampv`floatv`doublev`symbolv`stringv`uuidv`ipaddrv`int128v`intv`arrv`blobv," + + "[BOOL,CHAR,SHORT,LONG,DATE,MONTH,SECOND,DATETIME,TIMESTAMP,NANOTIME,NANOTIMESTAMP,FLOAT,DOUBLE,SYMBOL,STRING,UUID, IPADDR, INT128,INT,INT,BLOB]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "boolv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(nil, nil, int16(1), int64(4), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 1) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) + err = ddb.Close() + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_getStatus_write_successful(t *testing.T) { + Convey("test_multithreadTableWriterTest_getStatus_write_successful", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + scriptGoroutineCount := "t = streamTable(1000:0, `intv`datev,[INT,DATE]);" + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "datev", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + for i := 0; i < 15; i++ { + err = mtt.Insert(int32(i), time.Date(1969, time.Month(12), 20+i, 1, 1, 0, 0, time.UTC)) + So(err, ShouldBeNil) + } + mtt.WaitForGoroutineCompletion() + status := mtt.GetStatus() + So(status.FailedRows, ShouldEqual, 0) + So(status.ErrMsg, ShouldEqual, "") + So(status.IsExit, ShouldBeTrue) + So(status.SentRows, ShouldEqual, 15) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + err = ddb.Close() + So(err, ShouldBeNil) + }) +} + +func TestMultithreadTableWriterTest_getStatus_write_successful_normalData(t *testing.T) { + Convey("test_multithreadTableWriterTest_getStatus_write_successful_normalData", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + scriptGoroutineCount := "t = streamTable(1000:0, `intv`datev,[INT,DATE]);" + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 100000, + Throttle: 1000, + PartitionCol: "datev", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + for i := 0; i < 15; i++ { + err = mtt.Insert(int32(i), time.Date(1969, time.Month(12), 20+i, 1, 1, 0, 0, time.UTC)) + So(err, ShouldBeNil) + } + status := mtt.GetStatus() + So(status.FailedRows, ShouldEqual, 0) + So(status.ErrMsg, ShouldEqual, "") + So(status.IsExit, ShouldBeFalse) + So(status.SentRows, ShouldEqual, 0) + mtt.WaitForGoroutineCompletion() + status = mtt.GetStatus() + So(status.FailedRows, ShouldEqual, 0) + So(status.ErrMsg, ShouldEqual, "") + So(status.IsExit, ShouldBeTrue) + So(status.SentRows, ShouldEqual, 15) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + err = ddb.Close() + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_bool(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_bool", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + scriptGoroutineCount := "t = streamTable(1000:0, `bool`id," + + "[BOOL,INT]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "bool", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(byte(1), int32(16)) + So(err, ShouldBeNil) + err = mtt.Insert(byte(0), int32(16)) + So(err, ShouldBeNil) + err = mtt.Insert(nil, int32(16)) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 3) + So(reTable.GetColumnByName("id").String(), ShouldEqual, "vector([16, 16, 16])") + So(reTable.GetColumnByName("bool").Data.Value()[0], ShouldEqual, true) + So(reTable.GetColumnByName("bool").Data.Value()[1], ShouldEqual, false) + So(reTable.GetColumnByName("bool").Get(2).IsNull(), ShouldEqual, true) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + err = ddb.Close() + So(err, ShouldBeNil) + }) +} +func TestMultiGoroutineTable_insert_byte_int32_int64_int16(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_byte", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + scriptGoroutineCount := "t = streamTable(1000:0, `char`int`long`short`id," + + "[CHAR,INT,LONG,SHORT,INT]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "id", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(byte(1), int32(1), int64(1), int16(1), int32(1)) + So(err, ShouldBeNil) + err = mtt.Insert(nil, int32(1), int64(1), int16(1), int32(1)) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + So(reTable.GetColumnByName("id").String(), ShouldEqual, "vector([1, 1])") + So(reTable.GetColumnByName("long").String(), ShouldEqual, "vector([1, 1])") + So(reTable.GetColumnByName("short").String(), ShouldEqual, "vector([1, 1])") + So(reTable.GetColumnByName("char").Data.Value()[0], ShouldEqual, 1) + So(reTable.GetColumnByName("char").Get(1).IsNull(), ShouldEqual, true) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + err = ddb.Close() + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_float32_float64(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_double", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = streamTable(1000:0, `floatv`doublev`id," + + "[FLOAT,DOUBLE,INT]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "id", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(float32(2.5), float64(5.6), int32(10)) + So(err, ShouldBeNil) + err = mtt.Insert(nil, nil, int32(1)) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + So(reTable.GetColumnByName("id").String(), ShouldEqual, "vector([10, 1])") + So(reTable.GetColumnByName("floatv").Data.Value()[0], ShouldEqual, float32(2.5)) + So(reTable.GetColumnByName("floatv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("doublev").Data.Value()[0], ShouldEqual, float64(5.6)) + So(reTable.GetColumnByName("doublev").Get(1).IsNull(), ShouldEqual, true) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_streamTable_insert_timetype(t *testing.T) { + Convey("TestMultiGoroutineTable_streamTable_insert_timetype", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = streamTable(1000:0, `datev`monthv`secondv`minutev`datetimev`timestampv`datehourv`timev`nanotimev`nanotimestampv," + + "[DATE, MONTH, SECOND, MINUTE, DATETIME, TIMESTAMP, DATEHOUR, TIME, NANOTIME, NANOTIMESTAMP]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 5, + Throttle: 1, + PartitionCol: "datev", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 144145868, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 144145868, time.UTC), + time.Date(1969, time.Month(12), 31, 23, 59, 59, 144145868, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489548541, time.UTC), + time.Date(1970, time.Month(1), 1, 12, 23, 0, 495321123, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC)) + So(err, ShouldBeNil) + err = mtt.Insert(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), + nil, + time.Date(1970, time.Month(12), 1, 12, 23, 45, 144145868, time.UTC), + nil, + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489485541, time.UTC), + nil, + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + nil) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 3) + So(reTable.GetColumnByName("datev").Get(0).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(0).String(), ShouldEqual, "1970.12M") + So(reTable.GetColumnByName("secondv").Get(0).String(), ShouldEqual, "12:23:45") + So(reTable.GetColumnByName("minutev").Get(0).String(), ShouldEqual, "23:59m") + So(reTable.GetColumnByName("datetimev").Get(0).String(), ShouldEqual, "1968.11.01T23:59:59") + So(reTable.GetColumnByName("timestampv").Get(0).String(), ShouldEqual, "1970.12.01T12:23:45.489") + So(reTable.GetColumnByName("datehourv").Get(0).String(), ShouldEqual, "1970.01.01T12") + So(reTable.GetColumnByName("timev").Get(0).String(), ShouldEqual, "23:59:59.154") + So(reTable.GetColumnByName("nanotimev").Get(0).String(), ShouldEqual, "23:59:59.154140487") + So(reTable.GetColumnByName("nanotimestampv").Get(0).String(), ShouldEqual, "1968.11.01T23:59:59.154140487") + So(reTable.GetColumnByName("datev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("monthv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("secondv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("minutev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datetimev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timestampv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datehourv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("nanotimev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("nanotimestampv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datev").Get(2).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("secondv").Get(2).String(), ShouldEqual, "12:23:45") + So(reTable.GetColumnByName("minutev").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datetimev").Get(2).String(), ShouldEqual, "1968.11.01T23:59:59") + So(reTable.GetColumnByName("timestampv").Get(2).String(), ShouldEqual, "1970.12.01T12:23:45.489") + So(reTable.GetColumnByName("datehourv").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timev").Get(2).String(), ShouldEqual, "23:59:59.154") + So(reTable.GetColumnByName("nanotimev").Get(2).String(), ShouldEqual, "23:59:59.154140487") + So(reTable.GetColumnByName("nanotimestampv").Get(2).IsNull(), ShouldEqual, true) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_memTable_insert_timetype(t *testing.T) { + Convey("TestMultiGoroutineTable_memTable_insert_timetype", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `datev`monthv`secondv`minutev`datetimev`timestampv`datehourv`timev`nanotimev`nanotimestampv," + + "[DATE, MONTH, SECOND, MINUTE, DATETIME, TIMESTAMP, DATEHOUR, TIME, NANOTIME, NANOTIMESTAMP]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 5, + Throttle: 1, + PartitionCol: "datev", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489457541, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489457541, time.UTC), + time.Date(1969, time.Month(12), 31, 23, 59, 59, 489457541, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489457541, time.UTC), + time.Date(1970, time.Month(1), 1, 12, 23, 0, 495321123, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC)) + So(err, ShouldBeNil) + err = mtt.Insert(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), + nil, + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489457541, time.UTC), + nil, + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489457541, time.UTC), + nil, + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + nil) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 3) + So(reTable.GetColumnByName("datev").Get(0).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(0).String(), ShouldEqual, "1970.12M") + So(reTable.GetColumnByName("secondv").Get(0).String(), ShouldEqual, "12:23:45") + So(reTable.GetColumnByName("minutev").Get(0).String(), ShouldEqual, "23:59m") + So(reTable.GetColumnByName("datetimev").Get(0).String(), ShouldEqual, "1968.11.01T23:59:59") + So(reTable.GetColumnByName("timestampv").Get(0).String(), ShouldEqual, "1970.12.01T12:23:45.489") + So(reTable.GetColumnByName("datehourv").Get(0).String(), ShouldEqual, "1970.01.01T12") + So(reTable.GetColumnByName("timev").Get(0).String(), ShouldEqual, "23:59:59.154") + So(reTable.GetColumnByName("nanotimev").Get(0).String(), ShouldEqual, "23:59:59.154140487") + So(reTable.GetColumnByName("nanotimestampv").Get(0).String(), ShouldEqual, "1968.11.01T23:59:59.154140487") + So(reTable.GetColumnByName("datev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("monthv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("secondv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("minutev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datetimev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timestampv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datehourv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("nanotimev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("nanotimestampv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datev").Get(2).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("secondv").Get(2).String(), ShouldEqual, "12:23:45") + So(reTable.GetColumnByName("minutev").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datetimev").Get(2).String(), ShouldEqual, "1968.11.01T23:59:59") + So(reTable.GetColumnByName("timestampv").Get(2).String(), ShouldEqual, "1970.12.01T12:23:45.489") + So(reTable.GetColumnByName("datehourv").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timev").Get(2).String(), ShouldEqual, "23:59:59.154") + So(reTable.GetColumnByName("nanotimev").Get(2).String(), ShouldEqual, "23:59:59.154140487") + So(reTable.GetColumnByName("nanotimestampv").Get(2).IsNull(), ShouldEqual, true) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_dfsTable_insert_timetype(t *testing.T) { + Convey("TestMultiGoroutineTable_dfsTable_insert_timetype", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptdfshashtable := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + t=table(1000:0, ["datev", "monthv", "secondv", "minutev", "datetimev", "timestampv", "datehourv", "timev", "nanotimev", "nanotimestampv"], [DATE, MONTH, SECOND, MINUTE, DATETIME, TIMESTAMP, DATEHOUR, TIME, NANOTIME, NANOTIMESTAMP]); + db=database("` + DBdfsPath + `", HASH, [MONTH, 10]) + pt=db.createPartitionedTable(t, "` + DfsTableName1 + `", 'datev')` + _, err = ddb.RunScript(scriptdfshashtable) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 5, + Throttle: 1, + PartitionCol: "datev", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 145868, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 145868, time.UTC), + time.Date(1969, time.Month(12), 31, 23, 59, 59, 112225671, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489541124, time.UTC), + time.Date(1970, time.Month(1), 1, 12, 23, 0, 495321123, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC)) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), nil, time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), nil, nil, nil, nil, nil, nil, nil) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), + nil, + time.Date(1970, time.Month(12), 1, 12, 23, 45, 145485868, time.UTC), + nil, + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489558941, time.UTC), + nil, + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + nil) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from loadTable('" + DBdfsPath + "', '" + DfsTableName1 + "')") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 3) + So(reTable.GetColumnByName("datev").Get(0).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(0).String(), ShouldEqual, "1970.12M") + So(reTable.GetColumnByName("secondv").Get(0).String(), ShouldEqual, "12:23:45") + So(reTable.GetColumnByName("minutev").Get(0).String(), ShouldEqual, "23:59m") + So(reTable.GetColumnByName("datetimev").Get(0).String(), ShouldEqual, "1968.11.01T23:59:59") + So(reTable.GetColumnByName("timestampv").Get(0).String(), ShouldEqual, "1970.12.01T12:23:45.489") + So(reTable.GetColumnByName("datehourv").Get(0).String(), ShouldEqual, "1970.01.01T12") + So(reTable.GetColumnByName("timev").Get(0).String(), ShouldEqual, "23:59:59.154") + So(reTable.GetColumnByName("nanotimev").Get(0).String(), ShouldEqual, "23:59:59.154140487") + So(reTable.GetColumnByName("nanotimestampv").Get(0).String(), ShouldEqual, "1968.11.01T23:59:59.154140487") + So(reTable.GetColumnByName("datev").Get(1).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("secondv").Get(1).String(), ShouldEqual, "23:59:59") + So(reTable.GetColumnByName("minutev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datetimev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timestampv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datehourv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("nanotimev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("nanotimestampv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datev").Get(2).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("secondv").Get(2).String(), ShouldEqual, "12:23:45") + So(reTable.GetColumnByName("minutev").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datetimev").Get(2).String(), ShouldEqual, "1968.11.01T23:59:59") + So(reTable.GetColumnByName("timestampv").Get(2).String(), ShouldEqual, "1970.12.01T12:23:45.489") + So(reTable.GetColumnByName("datehourv").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timev").Get(2).String(), ShouldEqual, "23:59:59.154") + So(reTable.GetColumnByName("nanotimev").Get(2).String(), ShouldEqual, "23:59:59.154140487") + So(reTable.GetColumnByName("nanotimestampv").Get(2).IsNull(), ShouldEqual, true) + _, err = ddb.RunScript("dropDatabase('" + DBdfsPath + "')") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_dimensionTable_insert_timetype(t *testing.T) { + Convey("TestMultiGoroutineTable_dimensionTable_insert_timetype", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptdfshashtable := ` + if(existsDatabase("` + DBdfsPath + `")){ + dropDatabase("` + DBdfsPath + `") + } + t = table(1000:0, ["datev", "monthv", "secondv", "minutev", "datetimev", "timestampv", "datehourv", "timev", "nanotimev", "nanotimestampv"], [DATE, MONTH, SECOND, MINUTE, DATETIME, TIMESTAMP, DATEHOUR, TIME, NANOTIME, NANOTIMESTAMP]); + db=database("` + DBdfsPath + `", HASH, [MONTH, 10]) + pt=db.createTable(t, "` + DfsTableName1 + `")` + _, err = ddb.RunScript(scriptdfshashtable) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 5, + Throttle: 1, + PartitionCol: "", + Database: DBdfsPath, + TableName: DfsTableName1, + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 145861458, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 145864878, time.UTC), + time.Date(1969, time.Month(12), 31, 23, 59, 59, 111148745, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489541487, time.UTC), + time.Date(1970, time.Month(1), 1, 12, 23, 0, 495321123, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC)) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), nil, time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), nil, nil, nil, nil, nil, nil, nil) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC), + nil, + time.Date(1970, time.Month(12), 1, 12, 23, 45, 145887968, time.UTC), + nil, + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489148541, time.UTC), + nil, + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC), + nil) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from loadTable('" + DBdfsPath + "', '" + DfsTableName1 + "')") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 3) + So(reTable.GetColumnByName("datev").Get(0).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(0).String(), ShouldEqual, "1970.12M") + So(reTable.GetColumnByName("secondv").Get(0).String(), ShouldEqual, "12:23:45") + So(reTable.GetColumnByName("minutev").Get(0).String(), ShouldEqual, "23:59m") + So(reTable.GetColumnByName("datetimev").Get(0).String(), ShouldEqual, "1968.11.01T23:59:59") + So(reTable.GetColumnByName("timestampv").Get(0).String(), ShouldEqual, "1970.12.01T12:23:45.489") + So(reTable.GetColumnByName("datehourv").Get(0).String(), ShouldEqual, "1970.01.01T12") + So(reTable.GetColumnByName("timev").Get(0).String(), ShouldEqual, "23:59:59.154") + So(reTable.GetColumnByName("nanotimev").Get(0).String(), ShouldEqual, "23:59:59.154140487") + So(reTable.GetColumnByName("nanotimestampv").Get(0).String(), ShouldEqual, "1968.11.01T23:59:59.154140487") + So(reTable.GetColumnByName("datev").Get(1).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("secondv").Get(1).String(), ShouldEqual, "23:59:59") + So(reTable.GetColumnByName("minutev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datetimev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timestampv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datehourv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("nanotimev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("nanotimestampv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datev").Get(2).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("secondv").Get(2).String(), ShouldEqual, "12:23:45") + So(reTable.GetColumnByName("minutev").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datetimev").Get(2).String(), ShouldEqual, "1968.11.01T23:59:59") + So(reTable.GetColumnByName("timestampv").Get(2).String(), ShouldEqual, "1970.12.01T12:23:45.489") + So(reTable.GetColumnByName("datehourv").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timev").Get(2).String(), ShouldEqual, "23:59:59.154") + So(reTable.GetColumnByName("nanotimev").Get(2).String(), ShouldEqual, "23:59:59.154140487") + So(reTable.GetColumnByName("nanotimestampv").Get(2).IsNull(), ShouldEqual, true) + _, err = ddb.RunScript("dropDatabase('" + DBdfsPath + "')") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_memTable_insert_localTime(t *testing.T) { + Convey("TestMultiGoroutineTable_memTable_insert_timetype", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `datev`monthv`secondv`minutev`datetimev`timestampv`datehourv`timev`nanotimev`nanotimestampv," + + "[DATE, MONTH, SECOND, MINUTE, DATETIME, TIMESTAMP, DATEHOUR, TIME, NANOTIME, NANOTIMESTAMP]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 5, + Throttle: 1, + PartitionCol: "datev", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC).Local(), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489457541, time.UTC).Local(), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489457541, time.UTC).Local(), + time.Date(1969, time.Month(12), 31, 23, 59, 59, 489457541, time.UTC).Local(), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC).Local(), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489457541, time.UTC).Local(), + time.Date(1970, time.Month(1), 1, 12, 23, 0, 495321123, time.UTC).Local(), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC).Local(), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC).Local(), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC).Local()) + So(err, ShouldBeNil) + err = mtt.Insert(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + So(err, ShouldBeNil) + err = mtt.Insert(time.Date(1969, time.Month(12), 1, 1, 1, 0, 0, time.UTC).Local(), + nil, + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489457541, time.UTC).Local(), + nil, + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC).Local(), + time.Date(1970, time.Month(12), 1, 12, 23, 45, 489457541, time.UTC).Local(), + nil, + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC).Local(), + time.Date(1968, time.Month(11), 1, 23, 59, 59, 154140487, time.UTC).Local(), + nil) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 3) + So(reTable.GetColumnByName("datev").Get(0).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(0).String(), ShouldEqual, "1970.12M") + So(reTable.GetColumnByName("secondv").Get(0).String(), ShouldEqual, "12:23:45") + So(reTable.GetColumnByName("minutev").Get(0).String(), ShouldEqual, "23:59m") + So(reTable.GetColumnByName("datetimev").Get(0).String(), ShouldEqual, "1968.11.01T23:59:59") + So(reTable.GetColumnByName("timestampv").Get(0).String(), ShouldEqual, "1970.12.01T12:23:45.489") + So(reTable.GetColumnByName("datehourv").Get(0).String(), ShouldEqual, "1970.01.01T12") + So(reTable.GetColumnByName("timev").Get(0).String(), ShouldEqual, "23:59:59.154") + So(reTable.GetColumnByName("nanotimev").Get(0).String(), ShouldEqual, "23:59:59.154140487") + So(reTable.GetColumnByName("nanotimestampv").Get(0).String(), ShouldEqual, "1968.11.01T23:59:59.154140487") + So(reTable.GetColumnByName("datev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("monthv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("secondv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("minutev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datetimev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timestampv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datehourv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("nanotimev").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("nanotimestampv").Get(1).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datev").Get(2).String(), ShouldEqual, "1969.12.01") + So(reTable.GetColumnByName("monthv").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("secondv").Get(2).String(), ShouldEqual, "12:23:45") + So(reTable.GetColumnByName("minutev").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("datetimev").Get(2).String(), ShouldEqual, "1968.11.01T23:59:59") + So(reTable.GetColumnByName("timestampv").Get(2).String(), ShouldEqual, "1970.12.01T12:23:45.489") + So(reTable.GetColumnByName("datehourv").Get(2).IsNull(), ShouldEqual, true) + So(reTable.GetColumnByName("timev").Get(2).String(), ShouldEqual, "23:59:59.154") + So(reTable.GetColumnByName("nanotimev").Get(2).String(), ShouldEqual, "23:59:59.154140487") + So(reTable.GetColumnByName("nanotimestampv").Get(2).IsNull(), ShouldEqual, true) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_part_null(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_part_null", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db = database(Database, VALUE,`A`B`C`D);\n" + "t = table(1000:0, `boolv`charv`shortv`longv`datev`monthv`secondv`datetimev`timestampv`nanotimev`nanotimestampv`floatv`doublev`symbolv`stringv`uuidv`ipaddrv`int128v`id," + + "[BOOL,CHAR,SHORT,LONG,DATE,MONTH,SECOND,DATETIME,TIMESTAMP,NANOTIME,NANOTIMESTAMP,FLOAT,DOUBLE,SYMBOL,STRING,UUID, IPADDR, INT128,INT]);" + + "pt = db.createTable(t,`pt);" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 5, + Throttle: 1, + PartitionCol: "boolv", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(byte(1), nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil, nil) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt);") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 1) + unSentRows := mtt.GetStatus().UnSentRows + So(unSentRows, ShouldEqual, 0) + sentRows := mtt.GetStatus().SentRows + So(sentRows, ShouldEqual, 1) + So(reTable.GetColumnByName("boolv").String(), ShouldEqual, "vector([true])") + So(reTable.GetColumnByName("charv").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("shortv").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("longv").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("datev").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("monthv").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("secondv").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("datetimev").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("timestampv").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("nanotimev").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("nanotimestampv").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("floatv").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("doublev").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("symbolv").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("stringv").String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("uuidv").String(), ShouldEqual, "vector([00000000-0000-0000-0000-000000000000])") + So(reTable.GetColumnByName("ipaddrv").String(), ShouldEqual, "vector([0.0.0.0])") + So(reTable.GetColumnByName("int128v").String(), ShouldEqual, "vector([00000000000000000000000000000000])") + So(reTable.GetColumnByName("id").String(), ShouldEqual, "vector([])") + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_empty_arrayVector(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_empty_arrayVector", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`arrayv," + + "[INT,INT[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []int32{}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 1) + reIDv := reTable.GetColumnByName("intv") + reArrayv := reTable.GetColumnByName("arrayv") + So(reIDv.String(), ShouldEqual, "vector([10])") + So(reArrayv.String(), ShouldEqual, "vector([[]])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_different_length(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_different_length", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`arrayv`arrayv1`arrayv2," + + "[INT,INT[],BOOL[],BOOL[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []int32{1, 3}, []byte{1, 0, model.NullBool}, []byte{1, 0}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []int32{}, []byte{}, []byte{1, 0}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []int32{1, 2, model.NullInt}, []byte{1, 0, model.NullBool}, []byte{1, 0}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 3) + strmsg := mtt.GetStatus().String() + So(strmsg, ShouldContainSubstring, "sentRows : 3") + So(reTable.GetColumnByName("intv").String(), ShouldEqual, "vector([10, 10, 10])") + So(reTable.GetColumnByName("arrayv").GetVectorValue(0).String(), ShouldEqual, "vector([1, 3])") + So(reTable.GetColumnByName("arrayv").GetVectorValue(1).String(), ShouldEqual, "vector([])") + So(reTable.GetColumnByName("arrayv").GetVectorValue(2).String(), ShouldEqual, "vector([1, 2, ])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_char(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_char", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`charArr," + + "[INT,CHAR[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []byte{}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []byte{model.NullChar, 4}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + reIDv := reTable.GetColumnByName("intv") + reArrayv := reTable.GetColumnByName("charArr") + So(reIDv.String(), ShouldEqual, "vector([10, 10])") + So(reArrayv.String(), ShouldEqual, "vector([[], [, 4]])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_int(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_int", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`Arr," + + "[INT,INT[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []int32{}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(3), []int32{model.NullInt, 4}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + reIDv := reTable.GetColumnByName("intv") + reArrayv := reTable.GetColumnByName("Arr") + So(reIDv.String(), ShouldEqual, "vector([10, 3])") + So(reArrayv.String(), ShouldEqual, "vector([[], [, 4]])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_bool(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_bool", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`Arr," + + "[INT,BOOL[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []byte{}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(3), []byte{model.NullBool, 1}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + reIDv := reTable.GetColumnByName("intv") + reArrayv := reTable.GetColumnByName("Arr") + So(reIDv.String(), ShouldEqual, "vector([10, 3])") + So(reArrayv.String(), ShouldEqual, "vector([[], [, true]])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_long(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_long", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`Arr," + + "[INT,LONG[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []int64{}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(3), []int64{model.NullLong, 45}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + So(mtt.GetStatus().FailedRows, ShouldEqual, 0) + So(mtt.GetStatus().UnSentRows, ShouldEqual, 0) + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + reIDv := reTable.GetColumnByName("intv") + reArrayv := reTable.GetColumnByName("Arr") + So(reIDv.String(), ShouldEqual, "vector([10, 3])") + So(reArrayv.String(), ShouldEqual, "vector([[], [, 45]])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_short(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_short", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`Arr," + + "[INT,SHORT[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []int16{}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(3), []int16{model.NullShort, 15}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + So(mtt.GetStatus().FailedRows, ShouldEqual, 0) + So(mtt.GetStatus().UnSentRows, ShouldEqual, 0) + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + reIDv := reTable.GetColumnByName("intv") + reArrayv := reTable.GetColumnByName("Arr") + So(reIDv.String(), ShouldEqual, "vector([10, 3])") + So(reArrayv.String(), ShouldEqual, "vector([[], [, 15]])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_float(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_float", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`Arr," + + "[INT,FLOAT[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []float32{}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(3), []float32{model.NullFloat, 2.6}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + reIDv := reTable.GetColumnByName("intv") + reArrayv := reTable.GetColumnByName("Arr") + So(reIDv.String(), ShouldEqual, "vector([10, 3])") + So(reArrayv.String(), ShouldEqual, "vector([[], [, 2.6]])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_double(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_double", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`Arr," + + "[INT,DOUBLE[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []float64{}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(3), []float64{model.NullDouble, 2.6}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + reIDv := reTable.GetColumnByName("intv") + reArrayv := reTable.GetColumnByName("Arr") + So(reIDv.String(), ShouldEqual, "vector([10, 3])") + So(reArrayv.String(), ShouldEqual, "vector([[], [, 2.6]])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_date_month(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_date_month", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`Arr1`Arr2," + + "[INT, DATE[], MONTH[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "Arr1", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []time.Time{}, []time.Time{}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(3), []time.Time{model.NullTime, time.Date(1969, time.Month(12), 5, 23, 56, 59, 456789123, time.UTC), model.NullTime}, []time.Time{model.NullTime, time.Date(1969, time.Month(12), 5, 23, 56, 59, 456789123, time.UTC)}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + reIDv := reTable.GetColumnByName("intv") + reArray1v := reTable.GetColumnByName("Arr1") + reArray2v := reTable.GetColumnByName("Arr2") + So(reIDv.String(), ShouldEqual, "vector([10, 3])") + So(reArray1v.String(), ShouldEqual, "vector([[], [, 1969.12.05, ]])") + So(reArray2v.String(), ShouldEqual, "vector([[], [, 1969.12M]])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_time_minute_month(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_time_minute_month", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`Arr1`Arr2`Arr3," + + "[INT, TIME[], MINUTE[], SECOND[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "Arr1", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []time.Time{}, []time.Time{}, []time.Time{}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(3), []time.Time{model.NullTime, time.Date(1969, time.Month(12), 5, 23, 56, 59, 456789123, time.UTC), model.NullTime}, []time.Time{model.NullTime, time.Date(1969, time.Month(12), 5, 23, 56, 59, 456789123, time.UTC)}, []time.Time{model.NullTime, time.Date(1969, time.Month(12), 5, 23, 56, 59, 456789123, time.UTC)}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + reIDv := reTable.GetColumnByName("intv") + reArray1v := reTable.GetColumnByName("Arr1") + reArray2v := reTable.GetColumnByName("Arr2") + reArray3v := reTable.GetColumnByName("Arr3") + So(reIDv.String(), ShouldEqual, "vector([10, 3])") + So(reArray1v.String(), ShouldEqual, "vector([[], [, 23:56:59.456, ]])") + So(reArray2v.String(), ShouldEqual, "vector([[], [, 23:56m]])") + So(reArray3v.String(), ShouldEqual, "vector([[], [, 23:56:59]])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_datetime_timestamp_nanotime_nanotimstamp(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_datetime_timestamp_nanotime_nanotimstamp", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `intv`Arr1`Arr2`Arr3`Arr4," + + "[INT, DATETIME[], TIMESTAMP[], NANOTIME[], NANOTIMESTAMP[]]);" + + "share t as t1;" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "Arr1", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []time.Time{}, []time.Time{}, []time.Time{}, []time.Time{model.NullTime, time.Date(1970, time.Month(02), 5, 23, 56, 59, 999999999, time.UTC), model.NullTime}) + So(err, ShouldBeNil) + err = mtt.Insert(int32(3), []time.Time{model.NullTime, time.Date(1969, time.Month(12), 5, 23, 56, 59, 456789123, time.UTC), model.NullTime}, []time.Time{model.NullTime, time.Date(1969, time.Month(12), 5, 23, 56, 59, 456789123, time.UTC)}, []time.Time{model.NullTime, time.Date(1969, time.Month(12), 5, 23, 56, 59, 456789123, time.UTC)}, []time.Time{}) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + reIDv := reTable.GetColumnByName("intv") + reArray1v := reTable.GetColumnByName("Arr1") + reArray2v := reTable.GetColumnByName("Arr2") + reArray3v := reTable.GetColumnByName("Arr3") + reArray4v := reTable.GetColumnByName("Arr4") + So(reIDv.String(), ShouldEqual, "vector([10, 3])") + So(reArray1v.String(), ShouldEqual, "vector([[], [, 1969.12.05T23:56:59, ]])") + So(reArray2v.String(), ShouldEqual, "vector([[], [, 1969.12.05T23:56:59.456]])") + So(reArray3v.String(), ShouldEqual, "vector([[], [, 23:56:59.456789123]])") + So(reArray4v.String(), ShouldEqual, "vector([[, 1970.02.05T23:56:59.999999999, ], []])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_arrayVector_otherType(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_otherType", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = table(1000:0, `uuidv`int128v`ipaddrv," + + "[UUID[],INT128[],IPADDR[]]);" + + "share t as t1" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "uuidv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert([]string{"5d212a78-cc48-e3b1-4235-b4d91473ee87", "5d212a78-cc48-e3b1-4235-b4d91473ee87", model.NullUUID}, []string{"e1671797c52e15f763380b45e841ec32", model.NullInt128, "e1671797c52e15f763380b45e841ec32"}, []string{"192.168.1.13", "192.168.1.84", model.NullIP}) + So(err, ShouldBeNil) + err = mtt.Insert(nil, nil, nil) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + reArray1v := reTable.GetColumnByName("uuidv") + reArray2v := reTable.GetColumnByName("int128v") + reArray3v := reTable.GetColumnByName("ipaddrv") + So(reArray1v.String(), ShouldEqual, "vector([[5d212a78-cc48-e3b1-4235-b4d91473ee87, 5d212a78-cc48-e3b1-4235-b4d91473ee87, 00000000-0000-0000-0000-000000000000], [00000000-0000-0000-0000-000000000000]])") + So(reArray2v.String(), ShouldEqual, "vector([[e1671797c52e15f763380b45e841ec32, 00000000000000000000000000000000, e1671797c52e15f763380b45e841ec32], [00000000000000000000000000000000]])") + So(reArray3v.String(), ShouldEqual, "vector([[192.168.1.13, 192.168.1.84, 0.0.0.0], [0.0.0.0]])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_blob(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_blob", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = streamTable(1000:0, `intv`blobv," + + "[INT, BLOB]);" + + "share t as t1" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + err = mtt.Insert(int32(10), []byte("aaaaadsfasdfaa")) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 1) + reArray1v := reTable.GetColumnByName("blobv") + So(reArray1v.String(), ShouldEqual, "vector([aaaaadsfasdfaa])") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_wrong_type(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_arrayVector_otherType", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + scriptGoroutineCount := "t = streamTable(1000:0, `intv`doublev," + + "[INT,DOUBLE]);" + + "share t as t1" + _, err = ddb.RunScript(scriptGoroutineCount) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "intv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + tb := make([][]model.DataType, 0) + for i := 0; i < 1; i++ { + rowData := make([]model.DataType, 0) + dt1, _ := model.NewDataType(model.DtInt, int32(16+i)) + rowData = append(rowData, dt1) + dt2, _ := model.NewDataType(model.DtInt, int32(i)) + rowData = append(rowData, dt2) + tb = append(tb, rowData) + } + err = mtt.InsertUnwrittenData(tb) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + So(mtt.GetStatus().ErrMsg, ShouldContainSubstring, "failed to set DataType(int) into DataTypeList(double)") + _, err = ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + status := mtt.GetStatus() + So(len(tb), ShouldEqual, status.UnSentRows+status.SentRows+status.FailedRows) + unwrittenData := mtt.GetUnwrittenData() + So(unwrittenData[0][0].Value(), ShouldEqual, int32(16)) + So(unwrittenData[0][1].Value(), ShouldEqual, int32(0)) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_uuid_int128_ipaddr(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_uuid_int128_ipaddr", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script1 := "t = streamTable(1000:0, `uuidv`ipaddrv`int128v," + + "[UUID, IPADDR, INT128]);" + + "share t as t1;" + _, err = ddb.RunScript(script1) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1, + Throttle: 1, + PartitionCol: "uuidv", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + tb := make([][]model.DataType, 0) + for i := 0; i < 3; i++ { + rowData := make([]model.DataType, 0) + dt1, _ := model.NewDataType(model.DtUUID, "00000000-0004-e72c-0000-000000007eb1") + rowData = append(rowData, dt1) + dt2, _ := model.NewDataType(model.DtIP, "192.168.100.20") + rowData = append(rowData, dt2) + dt3, _ := model.NewDataType(model.DtInt128, "e1671797c52e15f763380b45e841ec32") + rowData = append(rowData, dt3) + tb = append(tb, rowData) + } + err = mtt.InsertUnwrittenData(tb) + So(err, ShouldBeNil) + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1;") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.GetColumnByName("uuidv").String(), ShouldEqual, "vector([00000000-0004-e72c-0000-000000007eb1, 00000000-0004-e72c-0000-000000007eb1, 00000000-0004-e72c-0000-000000007eb1])") + So(reTable.GetColumnByName("int128v").String(), ShouldEqual, "vector([e1671797c52e15f763380b45e841ec32, e1671797c52e15f763380b45e841ec32, e1671797c52e15f763380b45e841ec32])") + So(reTable.GetColumnByName("ipaddrv").String(), ShouldEqual, "vector([192.168.100.20, 192.168.100.20, 192.168.100.20])") + status := mtt.GetStatus() + So(len(tb), ShouldEqual, status.UnSentRows+status.SentRows) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_keytable(t *testing.T) { + Convey("TestMultiGoroutineTable_keytable", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "t=keyedStreamTable(`sym,1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade," + + "[SYMBOL, DATETIME, DOUBLE, FLOAT, INT, DOUBLE])\n ;share t as t1;" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 10, + Throttle: 1, + PartitionCol: "tradeDate", + Database: "", + TableName: "t1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + for i := 0; i < 10000; i++ { + err = mtt.Insert("AAPL"+strconv.Itoa(i%2), + time.Date(1969, time.Month(12), i%10+1, 23, i%10, 50, 000, time.UTC), + float64(22.5)+float64(i), float32(14.6)+float32(i), int32(i%10), float64(i)) + AssertNil(err) + } + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("select * from t1") + So(err, ShouldBeNil) + reTable := re.(*model.Table) + So(reTable.Rows(), ShouldEqual, 2) + status := mtt.GetStatus() + So(status.SentRows, ShouldEqual, 10000) + So(status.UnSentRows, ShouldEqual, 0) + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dt_multipleThreadCount(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dt_multipleThreadCount", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, 2012.01.01..2012.01.30)\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n ;share t as t1;" + + "\tcreateTable(dbHandle=db, table=t, tableName=`pt)\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 10, + BatchSize: 10, + Throttle: 1, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err = mtw.NewMultiGoroutineTable(opt) + So(err.Error(), ShouldContainSubstring, "the parameter GoroutineCount must be 1 for a dimension table") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_tsdb_dt_multipleThreadCount(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_tsdb_dt_multipleThreadCount", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, 2012.01.01..2012.01.30,,'TSDB')\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n ;share t as t1;" + + "\tcreateTable(dbHandle=db, table=t, tableName=`pt,sortColumns=`sym)\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 10, + BatchSize: 10, + Throttle: 1, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + _, err = mtw.NewMultiGoroutineTable(opt) + So(err.Error(), ShouldContainSubstring, "the parameter GoroutineCount must be 1 for a dimension table") + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} +func TestMultiGoroutineTable_insert_dt_multipleThread_groutine(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dt_multipleThread_groutine", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, 2012.01.01..2012.01.30)\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n ;share t as t1;" + + "\tcreateTable(dbHandle=db, table=t, tableName=`pt)\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 10, + Throttle: 1, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dt_multipleThread_tsdb_groutine(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dt_multipleThread_tsdb_groutine", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, 2012.01.01..2012.01.30,,'TSDB')\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n ;share t as t1;" + + "\tcreateTable(dbHandle=db, table=t, tableName=`pt, sortColumns=`sym)\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 10, + Throttle: 1, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dt_oneThread(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dt_oneThread", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, 2012.01.01..2012.01.30)\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n ;share t as t1;" + + "\tcreateTable(dbHandle=db, table=t, tableName=`pt)\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 10, + Throttle: 1, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 1 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_value(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_value", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, month(2012.01.01)+0..1)\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n ;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 10, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_hash(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_hash", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, HASH, [SYMBOL,3])\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n ;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"sym\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 10, + Throttle: 2, + PartitionCol: "sym", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1,SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_list(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_list", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, LIST, [`AAPL0`AAPL1`AAPL2, `AAPL3`AAPL4`AAPL5, `AAPL6`AAPL7`AAPL8`AAPL9])\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n ;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"sym\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 10, + Throttle: 20, + PartitionCol: "sym", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_value_value(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_value_value", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", VALUE, 1969.12.01..1969.12.10)\n" + + "\tdb2=database(\"\", VALUE, 0..10)\n" + + "\tdb=database(Database, COMPO, [db1, db2], , \"OLAP\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\", \"volume\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_value_range(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_value_range", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", VALUE, 1969.12.01..1969.12.10)\n" + + "\tdb2=database(\"\", RANGE,0 5 10)\n" + + "\tdb=database(Database, COMPO, [db1, db2], , \"OLAP\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\", \"volume\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_range_value(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_value_range", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", VALUE, 1969.12.01..1969.12.10)\n" + + "\tdb2=database(\"\", RANGE,0 5 10)\n" + + "\tdb=database(Database, COMPO, [db2, db1], , \"OLAP\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"volume\", \"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_range_range(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_range_range", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", RANGE, 1969.12.01 1969.12.05 1969.12.11)\n" + + "\tdb2=database(\"\", RANGE,0 5 11)\n" + + "\tdb=database(Database, COMPO, [db2, db1], , \"OLAP\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"volume\", \"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_range_hash(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_range_hash", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", RANGE, 1969.12.01 1969.12.05 1969.12.11)\n" + + "\tdb2=database(\"\", HASH,[INT,3])\n" + + "\tdb=database(Database, COMPO, [db1, db2], , \"OLAP\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\", \"volume\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_hash_range(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_hash_range", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", RANGE, 1969.12.01 1969.12.05 1969.12.11)\n" + + "\tdb2=database(\"\", HASH,[INT,3])\n" + + "\tdb=database(Database, COMPO, [db2, db1], , \"OLAP\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"volume\", \"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_hash_hash_chunkGranularity_database(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_hash_hash", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", HASH, [DATEHOUR,3])\n" + + "\tdb2=database(\"\", HASH,[SYMBOL,3])\n" + + "\tdb=database(Database, COMPO, [db1, db2], , \"OLAP\", chunkGranularity=\"DATABASE\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE]);\n share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\",\"sym\"],compressMethods={tradeDate:\"delta\", volume:\"delta\"})\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_hash_value_chunkGranularity_database(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_hash_value_chunkGranularity_database", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", HASH, [DATEHOUR,3])\n" + + "\tdb2=database(\"\", VALUE, 0..10)\n" + + "\tdb=database(Database, COMPO, [db1, db2], , \"OLAP\", chunkGranularity=\"DATABASE\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\",\"volume\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_hash_range_chunkGranularity_database(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_dfs_hash_value", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", HASH, [DATEHOUR,3])\n" + + "\tdb2=database(\"\", RANGE, 0 5 11)\n" + + "\tdb=database(Database, COMPO, [db1, db2], , \"OLAP\", chunkGranularity=\"DATABASE\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\",\"volume\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_PartitionType_datehour_partirtioncoldatetime(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_PartitionType_datehour_partitioncoldatetime", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", VALUE, date(1969.12.01)+0..10)\n" + + "\tdb2=database(\"\", HASH, [SYMBOL, 2])\n" + + "\tdb=database(Database, COMPO, [db1, db2], , \"OLAP\", chunkGranularity=\"DATABASE\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\",\"sym\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_PartitionType_datehour_partitioncoltimestamp(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_PartitionType_datehour_partitioncoltimestamp", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", VALUE, date(1969.12.01)+0..10)\n" + + "\tdb2=database(\"\", HASH, [SYMBOL, 2])\n" + + "\tdb=database(Database, COMPO, [db1, db2], , \"OLAP\", chunkGranularity=\"DATABASE\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, TIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\",\"sym\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_PartitionType_datehour_partitioncolnanotimestamp(t *testing.T) { + Convey("TestMultiGoroutineTable_insert_PartitionType_datehour_partitioncolnanotimestamp", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", VALUE, date(1969.12.01)+0..10)\n" + + "\tdb2=database(\"\", HASH, [SYMBOL, 2])\n" + + "\tdb=database(Database, COMPO, [db1, db2], , \"OLAP\", chunkGranularity=\"DATABASE\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, NANOTIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\",\"sym\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncoldatetime(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncoldatetime", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, datehour(1969.12.01)+0..10)\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncoltimestamp(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncoltimestamp", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, datehour(1969.12.01)+0..10)\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, TIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncolnanotimestamp(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncolnanotimestamp", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, datehour(1969.12.01)+0..10)\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, NANOTIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncoldate_range(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncoldate_range", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, RANGE, [1969.12.01, 1969.12.05, 1969.12.11])\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATE, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncoldatetime_range(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncoldatetime_range", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, RANGE, [1969.12.01, 1969.12.05, 1969.12.11])\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncoltimestamp_range(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncoltimestamp_range", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, RANGE, [1969.12.01, 1969.12.05, 1969.12.11])\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, TIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncolnanotimestamp_range(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_date_partitioncolnanotimestamp", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, RANGE, [1969.12.01, 1969.12.05, 1969.12.11])\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, NANOTIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_month_partitioncoldatetime_range(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_month_partitioncoldatetime_range", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, month(1..10))\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_month_partitioncoltimestamp_range(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_PartitionType_partitiontype_month_partitioncoltimestamp_range", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, month(1..10))\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, TIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 1000, + Throttle: 20, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + threadTime := 10 + n := 1000 + waitGroup.Add(threadTime) + for i := 0; i < threadTime; i++ { + go threadinsertData(mtt, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_sameTable(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_sameTable", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "\n" + + "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, 1..5)\n" + + "t=table(1:0, `volume`valueTrade, [INT, DOUBLE])\n" + + " ;share t as t1;\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"volume\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + mtt2, err := mtw.NewMultiGoroutineTable(opt) + So(err, ShouldBeNil) + tb1 := make([][]model.DataType, 0) + tb2 := make([][]model.DataType, 0) + for i := 0; i < 1; i++ { + rowData1 := make([]model.DataType, 0) + rowData2 := make([]model.DataType, 0) + dt1, _ := model.NewDataType(model.DtInt, int32(1)) + rowData1 = append(rowData1, dt1) + dt2, _ := model.NewDataType(model.DtDouble, float64(12.9)) + rowData1 = append(rowData1, dt2) + dt3, _ := model.NewDataType(model.DtInt, int32(2)) + rowData2 = append(rowData2, dt3) + dt4, _ := model.NewDataType(model.DtDouble, float64(22.9)) + rowData2 = append(rowData2, dt4) + tb1 = append(tb1, rowData1) + tb2 = append(tb2, rowData2) + } + for i := 0; i < 10; i++ { + err = mtt1.InsertUnwrittenData(tb1) + AssertNil(err) + err = mtt2.InsertUnwrittenData(tb2) + AssertNil(err) + } + for j := 0; j < 10; j++ { + var intarr1 []int32 + var floatarr1 []float64 + for i := 0; i < 1; i++ { + floatarr1 = append(floatarr1, float64(12.9)) + intarr1 = append(intarr1, int32(1)) + } + valueTrade1, _ := model.NewDataTypeListWithRaw(model.DtDouble, floatarr1) + volume1, _ := model.NewDataTypeListWithRaw(model.DtInt, intarr1) + tmp1 := model.NewTable([]string{"volume", "valueTrade"}, + []*model.Vector{model.NewVector(volume1), model.NewVector(valueTrade1)}) + _, err = ddb.RunFunc("tableInsert{t1}", []model.DataForm{tmp1}) + AssertNil(err) + time.Sleep(3 * time.Second) + var intarr2 []int32 + var floatarr2 []float64 + for i := 0; i < 1; i++ { + floatarr2 = append(floatarr2, float64(22.9)) + intarr2 = append(intarr2, int32(2)) + } + valueTrade2, _ := model.NewDataTypeListWithRaw(model.DtDouble, floatarr2) + volume2, _ := model.NewDataTypeListWithRaw(model.DtInt, intarr2) + tmp2 := model.NewTable([]string{"volume", "valueTrade"}, + []*model.Vector{model.NewVector(volume2), model.NewVector(valueTrade2)}) + _, err = ddb.RunFunc("tableInsert{t1}", []model.DataForm{tmp2}) + AssertNil(err) + } + mtt1.WaitForGoroutineCompletion() + mtt2.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from t1 order by volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, reTable2.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_differentTable(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_differentTable", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "\n" + + "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, 1..5)\n" + + "t=table(1:0, `volume`valueTrade, [INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt1, partitionColumns=[\"volume\"]);\n" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt2, partitionColumns=[\"volume\"]);\n" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt3, partitionColumns=[\"volume\"]);\n" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt4, partitionColumns=[\"volume\"]);\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + opt2 := &mtw.Option{ + GoroutineCount: 10, + BatchSize: 30, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt2", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt2, err := mtw.NewMultiGoroutineTable(opt2) + So(err, ShouldBeNil) + opt3 := &mtw.Option{ + GoroutineCount: 10, + BatchSize: 100, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt3", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt3, err := mtw.NewMultiGoroutineTable(opt3) + So(err, ShouldBeNil) + opt4 := &mtw.Option{ + GoroutineCount: 2, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt4", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt4, err := mtw.NewMultiGoroutineTable(opt4) + So(err, ShouldBeNil) + tb := make([][]model.DataType, 0) + for i := 0; i < 1; i++ { + rowData := make([]model.DataType, 0) + dt1, _ := model.NewDataType(model.DtInt, int32(16+i)) + rowData = append(rowData, dt1) + dt2, _ := model.NewDataType(model.DtDouble, float64(22.9)) + rowData = append(rowData, dt2) + tb = append(tb, rowData) + } + for i := 0; i < 10; i++ { + err = mtt1.InsertUnwrittenData(tb) + AssertNil(err) + err = mtt2.InsertUnwrittenData(tb) + AssertNil(err) + err = mtt3.InsertUnwrittenData(tb) + AssertNil(err) + err = mtt4.InsertUnwrittenData(tb) + AssertNil(err) + } + for j := 0; j < 10; j++ { + var intarr []int32 + var floatarr1 []float64 + for i := 0; i < 1; i++ { + floatarr1 = append(floatarr1, float64(22.9)) + intarr = append(intarr, int32(16)) + } + valueTrade, _ := model.NewDataTypeListWithRaw(model.DtDouble, floatarr1) + volume, _ := model.NewDataTypeListWithRaw(model.DtInt, intarr) + tmp := model.NewTable([]string{"volume", "valueTrade"}, + []*model.Vector{model.NewVector(volume), model.NewVector(valueTrade)}) + _, err = ddb.RunFunc("tableInsert{t1}", []model.DataForm{tmp}) + AssertNil(err) + } + mtt1.WaitForGoroutineCompletion() + mtt2.WaitForGoroutineCompletion() + mtt3.WaitForGoroutineCompletion() + mtt4.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt1) order by volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt2) order by volume,valueTrade") + So(err, ShouldBeNil) + re3, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt3) order by volume,valueTrade") + So(err, ShouldBeNil) + re4, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt4) order by volume,valueTrade") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from t1 order by volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + reTable3 := re3.(*model.Table) + reTable4 := re4.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + So(reTable2.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + So(reTable3.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + So(reTable4.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_differentDatabase(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_multiple_mutithreadTableWriter_differentDatabase", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script1 := "\n" + + "Database = \"dfs://test_MultithreadedTableWriter1\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, 1..5)\n" + + "t=table(1:0, `volume`valueTrade, [INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt1, partitionColumns=[\"volume\"]);\n" + _, err = ddb.RunScript(script1) + So(err, ShouldBeNil) + script2 := "\n" + + "Database = \"dfs://test_MultithreadedTableWriter2\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, 1..5)\n" + + "t=table(1:0, `volume`valueTrade, [INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt1, partitionColumns=[\"volume\"]);\n" + _, err = ddb.RunScript(script2) + So(err, ShouldBeNil) + script3 := "\n" + + "Database = \"dfs://test_MultithreadedTableWriter3\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, 1..5)\n" + + "t=table(1:0, `volume`valueTrade, [INT, DOUBLE])\n;share t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt1, partitionColumns=[\"volume\"]);\n" + _, err = ddb.RunScript(script3) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter1", + TableName: "pt1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + opt2 := &mtw.Option{ + GoroutineCount: 10, + BatchSize: 30, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter2", + TableName: "pt1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt2, err := mtw.NewMultiGoroutineTable(opt2) + So(err, ShouldBeNil) + opt3 := &mtw.Option{ + GoroutineCount: 10, + BatchSize: 100, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter3", + TableName: "pt1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt3, err := mtw.NewMultiGoroutineTable(opt3) + So(err, ShouldBeNil) + tb := make([][]model.DataType, 0) + for i := 0; i < 1; i++ { + rowData := make([]model.DataType, 0) + dt1, _ := model.NewDataType(model.DtInt, int32(16+i)) + rowData = append(rowData, dt1) + dt2, _ := model.NewDataType(model.DtDouble, float64(22.9)) + rowData = append(rowData, dt2) + tb = append(tb, rowData) + } + for i := 0; i < 10; i++ { + err = mtt1.InsertUnwrittenData(tb) + AssertNil(err) + err = mtt2.InsertUnwrittenData(tb) + AssertNil(err) + err = mtt3.InsertUnwrittenData(tb) + AssertNil(err) + } + for j := 0; j < 10; j++ { + var intarr []int32 + var floatarr1 []float64 + for i := 0; i < 1; i++ { + floatarr1 = append(floatarr1, float64(22.9)) + intarr = append(intarr, int32(16)) + } + valueTrade, _ := model.NewDataTypeListWithRaw(model.DtDouble, floatarr1) + volume, _ := model.NewDataTypeListWithRaw(model.DtInt, intarr) + tmp := model.NewTable([]string{"volume", "valueTrade"}, + []*model.Vector{model.NewVector(volume), model.NewVector(valueTrade)}) + _, err = ddb.RunFunc("tableInsert{t1}", []model.DataForm{tmp}) + AssertNil(err) + } + mtt1.WaitForGoroutineCompletion() + mtt2.WaitForGoroutineCompletion() + mtt3.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter1',`pt1) order by volume,valueTrade") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter2',`pt1) order by volume,valueTrade") + So(err, ShouldBeNil) + re3, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter3',`pt1) order by volume,valueTrade") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from t1 order by volume,valueTrade") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + reTable3 := re3.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + So(reTable2.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + So(reTable3.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter1\")") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter2\")") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter3\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_differentTable_status_isExiting(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_differentTable_status_isExiting", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "tmp1=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL,TIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tmp1 as st1;" + + "tmp2=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL,TIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tmp2 as st2;" + + "tmp3=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL,TIMESTAMP, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tmp3 as st3;" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "", + TableName: "st1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + opt2 := &mtw.Option{ + GoroutineCount: 10, + BatchSize: 30, + Throttle: 1, + PartitionCol: "volume", + Database: "", + TableName: "st2", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt2, err := mtw.NewMultiGoroutineTable(opt2) + So(err, ShouldBeNil) + opt3 := &mtw.Option{ + GoroutineCount: 10, + BatchSize: 100, + Throttle: 1, + PartitionCol: "volume", + Database: "", + TableName: "st3", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt3, err := mtw.NewMultiGoroutineTable(opt3) + So(err, ShouldBeNil) + n := 100 + for i := 0; i < 10; i++ { + waitGroup.Add(1) + go threadinsertData(mtt1, n) + waitGroup.Add(1) + go threadinsertData(mtt2, n) + waitGroup.Add(1) + go threadinsertData(mtt3, n) + insertDataTotable(n, "st1") + insertDataTotable(n, "st2") + insertDataTotable(n, "st3") + } + waitGroup.Wait() + mtt1.WaitForGoroutineCompletion() + mtt2.WaitForGoroutineCompletion() + mtt3.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from tmp1 order by volume,valueTrade;") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from tmp2 order by volume,valueTrade;") + So(err, ShouldBeNil) + re3, err := ddb.RunScript("select * from tmp3 order by volume,valueTrade;") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from tmp1 order by volume,valueTrade;") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + reTable3 := re3.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + So(reTable2.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + So(reTable3.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`st1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("undef(`st2, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("undef(`st3, SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_tsdb_keepDuplicates(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_tsdb_keepDuplicates", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "\n" + + "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, 0..11,,'TSDB');\n" + + //"share keyedStreamTable(`volume`tradeDate,1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATE, DOUBLE, DOUBLE, INT, DOUBLE]) as t1\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATE, DOUBLE, DOUBLE, INT, DOUBLE]); share t as t1; " + + "createPartitionedTable(dbHandle=db, table=t, tableName=`pt1, partitionColumns=[\"volume\"],sortColumns=`volume`tradeDate,compressMethods={volume:\"delta\"},keepDuplicates=LAST);" + + "createPartitionedTable(dbHandle=db, table=t, tableName=`pt2, partitionColumns=[\"volume\"],sortColumns=`volume`tradeDate,keepDuplicates=FIRST);" + + "createPartitionedTable(dbHandle=db, table=t, tableName=`pt3, partitionColumns=[\"volume\"],sortColumns=`volume`tradeDate,keepDuplicates=LAST);" + + "createTable(dbHandle=db, table=t, tableName=`pt4, sortColumns=`volume`tradeDate,compressMethods={volume:\"delta\"},keepDuplicates=LAST);\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + opt2 := &mtw.Option{ + GoroutineCount: 10, + BatchSize: 30, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt2", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt2, err := mtw.NewMultiGoroutineTable(opt2) + So(err, ShouldBeNil) + opt3 := &mtw.Option{ + GoroutineCount: 10, + BatchSize: 100, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt3", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt3, err := mtw.NewMultiGoroutineTable(opt3) + So(err, ShouldBeNil) + opt4 := &mtw.Option{ + GoroutineCount: 1, + BatchSize: 100, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt4", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt4, err := mtw.NewMultiGoroutineTable(opt4) + So(err, ShouldBeNil) + n := 100 + waitGroup.Add(40) + for i := 0; i < 10; i++ { + go threadinsertData(mtt1, n) + go threadinsertData(mtt2, n) + go threadinsertData(mtt3, n) + go threadinsertData(mtt4, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt1.WaitForGoroutineCompletion() + mtt2.WaitForGoroutineCompletion() + mtt3.WaitForGoroutineCompletion() + mtt4.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt1) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + re2, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt2) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + re3, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt3) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + re4, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt4) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + ex1, err := ddb.RunScript("select * from t1 where isDuplicated([volume, tradeDate], LAST)=false order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + ex2, err := ddb.RunScript("select * from t1 where isDuplicated([volume, tradeDate], FIRST)=false order by sym,tradePrice,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + reTable2 := re2.(*model.Table) + reTable3 := re3.(*model.Table) + reTable4 := re4.(*model.Table) + exTable1 := ex1.(*model.Table) + exTable2 := ex2.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable1.GetColumnByIndex(i).String()) + So(reTable2.GetColumnByIndex(i).String(), ShouldEqual, exTable2.GetColumnByIndex(i).String()) + So(reTable3.GetColumnByIndex(i).String(), ShouldEqual, exTable1.GetColumnByIndex(i).String()) + So(reTable4.GetColumnByIndex(i).String(), ShouldEqual, exTable1.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_length_eq_1024(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_length_eq_1024", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", RANGE, 1969.12.01+(0..11))\n" + + "\tdb2=database(\"\", HASH,[INT,3])\n" + + "\tdb=database(Database, COMPO, [db2, db1], , \"OLAP\", chunkGranularity=\"DATABASE\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL,DATEHOUR, DOUBLE, DOUBLE, INT, DOUBLE])\nshare t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"volume\",\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + n := 1024 + waitGroup.Add(1) + for i := 0; i < 1; i++ { + go threadinsertData(mtt1, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt1.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_length_eq_1048576(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_length_eq_1048576", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", RANGE, 1969.12.01+(0..11))\n" + + "\tdb2=database(\"\", HASH,[INT,3])\n" + + "\tdb=database(Database, COMPO, [db2, db1], , \"OLAP\", chunkGranularity=\"DATABASE\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL,DATEHOUR, DOUBLE, DOUBLE, INT, DOUBLE])\nshare t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"volume\",\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + n := 1048576 + waitGroup.Add(1) + for i := 0; i < 1; i++ { + go threadinsertData(mtt1, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt1.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfs_length_eq_3000000(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfs_length_eq_3000000", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db1=database(\"\", RANGE, 1969.12.01+(0..11))\n" + + "\tdb2=database(\"\", HASH,[INT,3])\n" + + "\tdb=database(Database, COMPO, [db2, db1], , \"OLAP\", chunkGranularity=\"DATABASE\")\n" + + "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL,DATEHOUR, DOUBLE, DOUBLE, INT, DOUBLE])\nshare t as t1;" + + "\tcreatePartitionedTable(dbHandle=db, table=t, tableName=`pt, partitionColumns=[\"volume\",\"tradeDate\"])\n" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + n := 3000000 + waitGroup.Add(1) + for i := 0; i < 1; i++ { + threadinsertData(mtt1, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt1.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt) order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_streamTable_multipleThread(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_streamTable_multipleThread", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "t=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "tt=table(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tt as t2;" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10, + Throttle: 3, + PartitionCol: "volume", + Database: "", + TableName: "t2", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + n := 1000 + waitGroup.Add(10) + for i := 0; i < 10; i++ { + go threadinsertData(mtt1, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt1.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from t2 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_streamtable_200cols(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_streamtable_200cols", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "t=streamTable(1:0, `sym`tradeDate, [SYMBOL,DATEHOUR])\n;\n" + + "addColumn(t,\"col\"+string(1..200),take([DOUBLE],200));share t as t1;" + + "tt=streamTable(1:0, `sym`tradeDate, [SYMBOL,DATEHOUR])\n;" + + "addColumn(tt,\"col\"+string(1..200),take([DOUBLE],200));share tt as trades;" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10000, + Throttle: 1, + PartitionCol: "sym", + Database: "", + TableName: "trades", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + for ind := 0; ind < 10; ind++ { + row := make([]model.DataForm, 202) + dt, err := model.NewDataType(model.DtString, "AAPL") + AssertNil(err) + row[0] = model.NewScalar(dt) + dt, err = model.NewDataType(model.DtNanoTimestamp, time.Date(2022, time.Month(1), 1+ind%10, 1, 1, 0, 0, time.UTC)) + AssertNil(err) + row[1] = model.NewScalar(dt) + i := float64(ind) + for j := 0; j < 200; j++ { + dt, err = model.NewDataType(model.DtDouble, i+0.1) + AssertNil(err) + row[j+2] = model.NewScalar(dt) + } + _, err = ddb.RunFunc("tableInsert{t1}", row) + So(err, ShouldBeNil) + err = mtt1.Insert("AAPL", time.Date(2022, time.Month(1), 1+ind%10, 1, 1, 0, 0, time.UTC), i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1) + AssertNil(err) + } + mtt1.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from trades order by sym,tradeDate;") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from t1 order by sym,tradeDate;") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("undef(`trades, SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_dfstable_200cols(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_dfstable_200cols", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "t=table(1:0, `sym`tradeDate, [SYMBOL,TIMESTAMP]);\n" + + "addColumn(t,\"col\"+string(1..200),take([DOUBLE],200));share t as t1;" + + "Database = \"dfs://test_MultithreadedTableWriter\"\n" + + "if(exists(Database)){\n" + + "\tdropDatabase(Database)\t\n" + + "}\n" + + "db=database(Database, VALUE, date(1..2),,'TSDB');\n" + + "createPartitionedTable(dbHandle=db, table=t, tableName=`pt1, partitionColumns=[\"tradeDate\"],sortColumns=`tradeDate,compressMethods={tradeDate:\"delta\"});" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 1000, + Throttle: 1, + PartitionCol: "tradeDate", + Database: "dfs://test_MultithreadedTableWriter", + TableName: "pt1", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + for ind := 0; ind < 10; ind++ { + row := make([]model.DataForm, 202) + dt, err := model.NewDataType(model.DtString, "AAPL") + AssertNil(err) + row[0] = model.NewScalar(dt) + dt, err = model.NewDataType(model.DtNanoTimestamp, time.Date(2022, time.Month(1), 1+ind%10, 1, 1, 0, 0, time.UTC)) + AssertNil(err) + row[1] = model.NewScalar(dt) + i := float64(ind) + for j := 0; j < 200; j++ { + dt, err = model.NewDataType(model.DtDouble, i+0.1) + AssertNil(err) + row[j+2] = model.NewScalar(dt) + } + _, err = ddb.RunFunc("tableInsert{t1}", row) + So(err, ShouldBeNil) + err = mtt1.Insert("AAPL", time.Date(2022, time.Month(1), 1+ind%10, 1, 1, 0, 0, time.UTC), i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, + i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1, i+0.1) + AssertNil(err) + } + mtt1.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from loadTable('dfs://test_MultithreadedTableWriter',`pt1) order by sym,tradeDate;") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from t1 order by sym,tradeDate;") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_MultithreadedTableWriter\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_concurrentWrite_getFailedData_when_unfinished_write(t *testing.T) { + Convey("func TestMultiGoroutineTable_concurrentWrite_getFailedData_when_unfinished_write", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "login(`admin,`123456)\n" + + "Database = \"dfs://test_mtw_concurrentWrite_FailedData\"\n" + + "if(existsDatabase(Database)){\n" + + "\tdropDB(Database)\n" + + "}\n" + + "db = database(Database,RANGE,0 10 20 30)\n" + + "t = table(10:0,`id`price`val,[INT,DOUBLE,INT])\n" + + "pt = db.createPartitionedTable(t,`pt,`id)" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 10, + BatchSize: 1000, + Throttle: 1, + PartitionCol: "id", + Database: "dfs://test_mtw_concurrentWrite_FailedData", + TableName: "pt", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + for i := 0; i < 10000; i++ { + err = mtt.Insert(int32(5), float64(14.6), int32(1)) + AssertNil(err) + } + failedData := mtt.GetStatus().FailedRows + UnwrittenData := mtt.GetUnwrittenData() + mtt.WaitForGoroutineCompletion() + re, err := ddb.RunScript("(exec count(*) from loadTable(Database, `pt) where val = 1)[0]") + So(err, ShouldBeNil) + reTable := re.(*model.Scalar) + So(failedData+len(UnwrittenData)+int(reTable.Value().(int32)), ShouldEqual, 10000) + _, err = ddb.RunScript("dropDatabase(\"dfs://test_mtw_concurrentWrite_FailedData\")") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_streamTable_eq_1024(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_streamTable_eq_1024", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "t=streamTable(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "tt=streamTable(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tt as t2;" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "", + TableName: "t2", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + n := 1024 + waitGroup.Add(1) + for i := 0; i < 1; i++ { + go threadinsertData(mtt1, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt1.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from t2 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_streamTable_eq_1048576(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_streamTable_eq_1048576", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "t=streamTable(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "tt=streamTable(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tt as t2;" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "", + TableName: "t2", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + n := 1048576 + waitGroup.Add(1) + for i := 0; i < 1; i++ { + go threadinsertData(mtt1, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt1.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from t2 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("undef(`t2, SHARED)") + So(err, ShouldBeNil) + }) +} + +func TestMultiGoroutineTable_insert_streamTable_eq_3000000(t *testing.T) { + Convey("func TestMultiGoroutineTable_insert_streamTable_eq_3000000", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + defer ddb.Close() + script := "t=streamTable(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share t as t1;" + + "tt=streamTable(1:0, `sym`tradeDate`tradePrice`vwap`volume`valueTrade, [SYMBOL, DATETIME, DOUBLE, DOUBLE, INT, DOUBLE])\n;share tt as t2;" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + opt1 := &mtw.Option{ + GoroutineCount: 20, + BatchSize: 10, + Throttle: 1, + PartitionCol: "volume", + Database: "", + TableName: "t2", + UserID: setup.UserName, + Password: setup.Password, + Address: setup.Address, + } + mtt1, err := mtw.NewMultiGoroutineTable(opt1) + So(err, ShouldBeNil) + n := 3000000 + waitGroup.Add(1) + for i := 0; i < 1; i++ { + go threadinsertData(mtt1, n) + insertDataTotable(n, "t1") + } + waitGroup.Wait() + mtt1.WaitForGoroutineCompletion() + re1, err := ddb.RunScript("select * from t2 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + ex, err := ddb.RunScript("select * from t1 order by sym,tradeDate,tradePrice,vwap,volume,valueTrade;") + So(err, ShouldBeNil) + reTable1 := re1.(*model.Table) + exTable := ex.(*model.Table) + for i := 0; i < len(reTable1.GetColumnNames()); i++ { + So(reTable1.GetColumnByIndex(i).String(), ShouldEqual, exTable.GetColumnByIndex(i).String()) + } + _, err = ddb.RunScript("undef(`t1, SHARED)") + So(err, ShouldBeNil) + _, err = ddb.RunScript("undef(`t2, SHARED)") + So(err, ShouldBeNil) + }) +} diff --git a/test/ploadText_test.go b/test/ploadText_test.go new file mode 100644 index 0000000..1581e72 --- /dev/null +++ b/test/ploadText_test.go @@ -0,0 +1,39 @@ +package test + +import ( + "context" + "fmt" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func TestPloadTest(t *testing.T) { + Convey("test_PloadTest_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + data := setup.DATADIR + "/TradesSmall.csv" + fmt.Println(data) + Convey("test_PloadTest_para_filename", func() { + tmp, err := ddb.RunScript("select * from loadText(\"" + data + "\")") + ex := tmp.(*model.Table) + So(err, ShouldBeNil) + re, err := PloadTextFileName(ddb, data) + So(err, ShouldBeNil) + result := CompareTablesDataformTable(ex, re) + So(result, ShouldBeTrue) + }) + Convey("test_PloadTest_para_delimiter", func() { + tmp, err := ddb.RunScript("select * from loadText(\"" + data + "\", ';')") + ex := tmp.(*model.Table) + So(err, ShouldBeNil) + re, err := PloadTextDelimiter(ddb, data, ";") + So(err, ShouldBeNil) + result := CompareTablesDataformTable(ex, re) + So(result, ShouldBeTrue) + }) + }) +} diff --git a/test/run_function_test.go b/test/run_function_test.go new file mode 100644 index 0000000..4b57614 --- /dev/null +++ b/test/run_function_test.go @@ -0,0 +1,106 @@ +package test + +import ( + "context" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func checkVectorisNull(arr *model.Vector) bool { + for i := 0; i < arr.Rows(); i++ { + re := arr.Data.IsNull(i) + if re != true { + return false + } + } + return true +} + +func TestRunScript(t *testing.T) { + Convey("test_RunScript_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("test_RunScript_func", func() { + Convey("test_RunScript_bool_scalar", func() { + tmp, err := ddb.RunScript("true") + So(err, ShouldBeNil) + reType := tmp.GetDataTypeString() + So(reType, ShouldEqual, "bool") + result := tmp.(*model.Scalar) + re := result.DataType.Value() + So(re, ShouldEqual, true) + tmp, err = ddb.RunScript("bool()") + So(err, ShouldBeNil) + result = tmp.(*model.Scalar) + re = result.IsNull() + So(re, ShouldEqual, true) + }) + Convey("test_RunScript_char_scalar", func() { + tmp, err := ddb.RunScript("'a'") + So(err, ShouldBeNil) + reType := tmp.GetDataTypeString() + So(reType, ShouldEqual, "char") + result := tmp.(*model.Scalar) + re := result.DataType.Value() + var ex byte = 97 + So(re, ShouldEqual, ex) + tmp, err = ddb.RunScript("char()") + So(err, ShouldBeNil) + result = tmp.(*model.Scalar) + re = result.IsNull() + So(re, ShouldEqual, true) + }) + Convey("test_RunScript_short_scalar", func() { + tmp, err := ddb.RunScript("22h") + So(err, ShouldBeNil) + reType := tmp.GetDataTypeString() + So(reType, ShouldEqual, "short") + result := tmp.(*model.Scalar) + re := result.DataType.Value() + var ex int16 = 22 + So(re, ShouldEqual, ex) + tmp, err = ddb.RunScript("short()") + So(err, ShouldBeNil) + result = tmp.(*model.Scalar) + re = result.IsNull() + So(re, ShouldEqual, true) + }) + Convey("test_RunScript_int_scalar", func() { + tmp, err := ddb.RunScript("22") + So(err, ShouldBeNil) + reType := tmp.GetDataTypeString() + So(reType, ShouldEqual, "int") + result := tmp.(*model.Scalar) + re := result.DataType.Value() + var ex int32 = 22 + So(re, ShouldEqual, ex) + tmp, err = ddb.RunScript("int()") + So(err, ShouldBeNil) + result = tmp.(*model.Scalar) + re = result.IsNull() + So(re, ShouldEqual, true) + }) + Convey("test_RunScript_long_vector", func() { + tmp, err := ddb.RunScript("22l 200l") + So(err, ShouldBeNil) + reType := tmp.GetDataTypeString() + So(reType, ShouldEqual, "long") + result := tmp.(*model.Vector) + re := result.Data.Value() + var ex1 int64 = 22 + var ex2 int64 = 200 + So(re[0], ShouldEqual, ex1) + So(re[1], ShouldEqual, ex2) + tmp, err = ddb.RunScript("take(00i, 10)") + So(err, ShouldBeNil) + result = tmp.(*model.Vector) + rs := checkVectorisNull(result) + So(rs, ShouldEqual, true) + }) + }) + }) +} diff --git a/test/saveTable_test.go b/test/saveTable_test.go new file mode 100644 index 0000000..98b87c3 --- /dev/null +++ b/test/saveTable_test.go @@ -0,0 +1,217 @@ +package test + +import ( + "context" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func TestSaveTable(t *testing.T) { + Convey("Test_function_SaveTable_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Drop all Databases", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + re, err := ddb.RunScript(`existsDatabase("` + dbPath + `")`) + So(err, ShouldBeNil) + isExitsDatabase := re.(*model.Scalar).DataType.Value() + So(isExitsDatabase, ShouldBeFalse) + } + }) + _, err = ddb.RunScript(`t=table(1..10 as id, 1969.12.26+ 1..10 as datev, "A"+string(1..10) as str)`) + So(err, ShouldBeNil) + Convey("Test_function_SaveTable_DBHandle_exception", func() { + _, err = ddb.RunScript(`if(exists("` + DiskDBPath + `")){ + rmdir("` + DiskDBPath + `", true)}`) + So(err, ShouldBeNil) + l := new(api.SaveTableRequest). + SetDBPath("dsa") + err = ddb.SaveTable(l) + So(err, ShouldNotBeNil) + }) + Convey("Test_function_SaveTable_disk_unpartitioned", func() { + _, err = ddb.RunScript(`if(exists("` + DiskDBPath + `")){ + rmdir("` + DiskDBPath + `", true)}`) + So(err, ShouldBeNil) + l := new(api.SaveTableRequest). + SetDBPath(DiskDBPath).SetTable("t") + err = ddb.SaveTable(l) + So(err, ShouldBeNil) + reTmp, err := LoadTable(ddb, "t", DiskDBPath) + So(err, ShouldBeNil) + reID := reTmp.Data.GetColumnByName("id") + redatev := reTmp.Data.GetColumnByName("datev") + restr := reTmp.Data.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(redatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(restr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + Convey("Test_function_SaveTable_SetTableName", func() { + _, err = ddb.RunScript(`if(exists("` + DiskDBPath + `")){ + rmdir("` + DiskDBPath + `", true)}`) + So(err, ShouldBeNil) + l := new(api.SaveTableRequest). + SetDBPath(DiskDBPath).SetTable("t").SetTableName(MemTableName) + err = ddb.SaveTable(l) + So(err, ShouldBeNil) + reTmp, err := LoadTable(ddb, MemTableName, DiskDBPath) + So(err, ShouldBeNil) + reID := reTmp.Data.GetColumnByName("id") + redatev := reTmp.Data.GetColumnByName("datev") + restr := reTmp.Data.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(redatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(restr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + Convey("Test_function_SaveTable_SetAppending_true", func() { + _, err = ddb.RunScript(`if(exists("` + DiskDBPath + `")){ + rmdir("` + DiskDBPath + `", true)}`) + So(err, ShouldBeNil) + l := new(api.SaveTableRequest). + SetDBPath(DiskDBPath).SetTable("t").SetTableName(MemTableName).SetAppending(true) + err = ddb.SaveTable(l) + So(err, ShouldBeNil) + reTmp, err := LoadTable(ddb, MemTableName, DiskDBPath) + So(err, ShouldBeNil) + reID := reTmp.Data.GetColumnByName("id") + redatev := reTmp.Data.GetColumnByName("datev") + restr := reTmp.Data.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(redatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(restr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + l1 := new(api.SaveTableRequest). + SetDBPath(DiskDBPath).SetTable("t").SetTableName(MemTableName).SetAppending(true) + err = ddb.SaveTable(l1) + So(err, ShouldBeNil) + reTmp1, err := LoadTable(ddb, MemTableName, DiskDBPath) + So(err, ShouldBeNil) + reID1 := reTmp1.Data.GetColumnByName("id") + redatev1 := reTmp1.Data.GetColumnByName("datev") + restr1 := reTmp1.Data.GetColumnByName("str") + So(reID1.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(redatev1.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05, 1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(restr1.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10, A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + Convey("Test_function_SaveTable_SetAppending_false", func() { + _, err = ddb.RunScript(`if(exists("` + DiskDBPath + `")){ + rmdir("` + DiskDBPath + `", true)}`) + So(err, ShouldBeNil) + l := new(api.SaveTableRequest). + SetDBPath(DiskDBPath).SetTable("t").SetTableName(MemTableName).SetAppending(false) + err = ddb.SaveTable(l) + So(err, ShouldBeNil) + reTmp, err := LoadTable(ddb, MemTableName, DiskDBPath) + So(err, ShouldBeNil) + reID := reTmp.Data.GetColumnByName("id") + redatev := reTmp.Data.GetColumnByName("datev") + restr := reTmp.Data.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(redatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(restr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + Convey("Test_function_SaveTable_SetAppending_false_SetCompression_true", func() { + _, err = ddb.RunScript(`if(exists("` + DiskDBPath + `")){ + rmdir("` + DiskDBPath + `", true)}`) + So(err, ShouldBeNil) + l := new(api.SaveTableRequest). + SetDBPath(DiskDBPath).SetTable("t").SetTableName(MemTableName).SetAppending(false).SetCompression(true) + err = ddb.SaveTable(l) + So(err, ShouldBeNil) + reTmp, err := LoadTable(ddb, MemTableName, DiskDBPath) + So(err, ShouldBeNil) + reID := reTmp.Data.GetColumnByName("id") + redatev := reTmp.Data.GetColumnByName("datev") + restr := reTmp.Data.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(redatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(restr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + Convey("Test_function_SaveTable_SetAppending_false_SetCompression_false", func() { + _, err = ddb.RunScript(`if(exists("` + DiskDBPath + `")){ + rmdir("` + DiskDBPath + `", true)}`) + So(err, ShouldBeNil) + l := new(api.SaveTableRequest). + SetDBPath(DiskDBPath).SetTable("t").SetTableName(MemTableName).SetAppending(false).SetCompression(false) + err = ddb.SaveTable(l) + So(err, ShouldBeNil) + reTmp, err := LoadTable(ddb, MemTableName, DiskDBPath) + So(err, ShouldBeNil) + reID := reTmp.Data.GetColumnByName("id") + redatev := reTmp.Data.GetColumnByName("datev") + restr := reTmp.Data.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(redatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(restr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + Convey("Test_function_SaveTable_SetAppending_true_SetCompression_false", func() { + _, err = ddb.RunScript(`if(exists("` + DiskDBPath + `")){ + rmdir("` + DiskDBPath + `", true)}`) + So(err, ShouldBeNil) + _, err = ddb.RunScript(`t=table(1..10 as id, 1969.12.26+ 1..10 as datev, "A"+string(1..10) as str)`) + So(err, ShouldBeNil) + l := new(api.SaveTableRequest). + SetDBPath(DiskDBPath).SetTable("t").SetTableName(MemTableName).SetAppending(true).SetCompression(false) + err = ddb.SaveTable(l) + So(err, ShouldBeNil) + reTmp, err := LoadTable(ddb, MemTableName, DiskDBPath) + So(err, ShouldBeNil) + reID := reTmp.Data.GetColumnByName("id") + redatev := reTmp.Data.GetColumnByName("datev") + restr := reTmp.Data.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(redatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(restr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + Convey("Test_function_SaveTable_SetAppending_true_SetCompression_true", func() { + _, err = ddb.RunScript(`if(exists("` + DiskDBPath + `")){ + rmdir("` + DiskDBPath + `", true)}`) + So(err, ShouldBeNil) + l := new(api.SaveTableRequest). + SetDBPath(DiskDBPath).SetTable("t").SetTableName(MemTableName).SetAppending(true).SetCompression(true) + err = ddb.SaveTable(l) + So(err, ShouldBeNil) + reTmp, err := LoadTable(ddb, MemTableName, DiskDBPath) + So(err, ShouldBeNil) + reID := reTmp.Data.GetColumnByName("id") + redatev := reTmp.Data.GetColumnByName("datev") + restr := reTmp.Data.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(redatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(restr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + Convey("Test_function_SaveTable_dbhandler", func() { + _, err = ddb.RunScript(`if(exists("` + DiskDBPath + `")){ + rmdir("` + DiskDBPath + `", true)}`) + So(err, ShouldBeNil) + _, err := ddb.RunScript(`db=database("` + DiskDBPath + `")`) + So(err, ShouldBeNil) + l := new(api.SaveTableRequest). + SetDBPath(DiskDBPath).SetTable("t").SetTableName(MemTableName).SetDBHandle("db") + err = ddb.SaveTable(l) + So(err, ShouldBeNil) + reTmp, err := LoadTable(ddb, MemTableName, DiskDBPath) + So(err, ShouldBeNil) + reID := reTmp.Data.GetColumnByName("id") + redatev := reTmp.Data.GetColumnByName("datev") + restr := reTmp.Data.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(redatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(restr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + }) +} diff --git a/test/saveText_test.go b/test/saveText_test.go new file mode 100644 index 0000000..1a9496f --- /dev/null +++ b/test/saveText_test.go @@ -0,0 +1,97 @@ +package test + +import ( + "context" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func CheckVectorEqual(vec *model.Vector) bool { + data := vec.Data.Value() + var j int32 = 1 + for i := 0; i < vec.Data.Len(); i++ { + if j < 100 { + if data[i] != j { + return false + } + } else if j == 100 { + j = 0 + } + j++ + } + return true +} +func TestSaveText(t *testing.T) { + Convey("Test_saveText_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + workdir := setup.WORKDIR + "/testSaveText.txt" + Convey("Test_saveText_exception", func() { + Convey("Test_saveText_scalar_exception", func() { + var a string = "1" + err = SaveText(ddb, a, workdir) + So(err, ShouldNotBeNil) + }) + Convey("Test_saveText_pair_exception", func() { + var a string = "1:3" + err = SaveText(ddb, a, workdir) + So(err, ShouldNotBeNil) + }) + Convey("Test_saveText_set_exception", func() { + var a string = "set(1 2 3)" + err = SaveText(ddb, a, workdir) + So(err, ShouldNotBeNil) + }) + Convey("Test_saveText_fileName_null_exception", func() { + var a string = "table(1 2 3 as id, 4 5 6 as val)" + err = SaveText(ddb, a, "NULL") + So(err, ShouldBeNil) + }) + }) + Convey("Test_saveText_obj_vector", func() { + var a string = "1 2 3" + err = SaveText(ddb, a, workdir) + So(err, ShouldBeNil) + temp, err := LoadTextFileName(ddb, workdir) + So(err, ShouldBeNil) + re := temp.Data.GetColumnByName(temp.Data.GetColumnNames()[0]).String() + So(re, ShouldEqual, "vector([1, 2, 3])") + }) + Convey("Test_saveText_obj_bigarray", func() { + var a string = "take(1..100, 5000000)" + err = SaveText(ddb, a, workdir) + So(err, ShouldBeNil) + temp, err := LoadTextFileName(ddb, workdir) + So(err, ShouldBeNil) + col := temp.Data.GetColumnByName(temp.Data.GetColumnNames()[0]) + re := CheckVectorEqual(col) + So(re, ShouldBeTrue) + }) + Convey("Test_saveText_obj_matrix", func() { + var a string = "matrix(1 2 3, 4 5 6)" + err = SaveText(ddb, a, workdir) + So(err, ShouldBeNil) + temp, err := LoadTextFileName(ddb, workdir) + So(err, ShouldBeNil) + re1 := temp.Data.GetColumnByName(temp.Data.GetColumnNames()[0]).String() + So(re1, ShouldEqual, "vector([1, 2, 3])") + re2 := temp.Data.GetColumnByName(temp.Data.GetColumnNames()[1]).String() + So(re2, ShouldEqual, "vector([4, 5, 6])") + }) + Convey("Test_saveText_obj_table", func() { + var a string = "table(1 2 3 as id , 4 5 6 as data)" + err = SaveText(ddb, a, workdir) + So(err, ShouldBeNil) + temp, err := LoadTextFileName(ddb, workdir) + So(err, ShouldBeNil) + re1 := temp.Data.GetColumnByName(temp.Data.GetColumnNames()[0]).String() + So(re1, ShouldEqual, "vector([1, 2, 3])") + re2 := temp.Data.GetColumnByName(temp.Data.GetColumnNames()[1]).String() + So(re2, ShouldEqual, "vector([4, 5, 6])") + }) + }) +} diff --git a/test/setup/settings.go b/test/setup/settings.go new file mode 100644 index 0000000..67c5924 --- /dev/null +++ b/test/setup/settings.go @@ -0,0 +1,25 @@ +package setup + +const ( + UserName = "admin" + Password = "123456" + Address = "192.168.0.75:8921" + IP = "192.168.0.75" + Port = 8921 + SubPort = 8009 + WORKDIR = "/home/zcwen/workdir" + DATADIR = "/home/zcwen/data" + Address2 = "192.168.0.75:8922" + Address3 = "192.168.0.75:8923" + Address4 = "192.168.0.75:8924" +) + +// const ( +// UserName = "admin" +// Password = "123456" +// Address = "127.0.0.1:8848" +// IP = "127.0.0.1" +// Port = "8848" +// WORK_DIR = "/home/sjw/jwshu/workdir" +// DATA_DIR = "/hdd/dolphindb/server/setup/data" +// ) diff --git a/test/streaming/goroutineClient_test.go b/test/streaming/goroutineClient_test.go new file mode 100644 index 0000000..a2cd0b6 --- /dev/null +++ b/test/streaming/goroutineClient_test.go @@ -0,0 +1,701 @@ +package test + +import ( + "context" + "fmt" + "strconv" + "sync" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/example/util" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/streaming" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +var tpc = streaming.NewGoroutineClient(setup.IP, setup.SubPort) +var gcConn, _ = api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) +var stopLabel bool +var wg sync.WaitGroup + +func CreateStreamingTableforGcTest() { + _, err := gcConn.RunScript("login(`admin,`123456);" + + "try{dropStreamTable('st1')}catch(ex){};" + + "try{dropStreamTable('st2')}catch(ex){};" + + "try{dropStreamTable('Trades')}catch(ex){};" + + "try{dropStreamTable('Receive')}catch(ex){};") + AssertNil(err) + _, err = gcConn.RunScript("st1 = streamTable(1000000:0,`tag`ts`data,[INT,TIMESTAMP,DOUBLE])\n" + + "share(st1,`Trades)\t\n" + "setStreamTableFilterColumn(objByName(`Trades),`tag)") + AssertNil(err) + _, err = gcConn.RunScript("st2 = streamTable(1000000:0,`tag`ts`data,[INT,TIMESTAMP,DOUBLE])\n" + + "share(st2, `Receive)\t\n") + AssertNil(err) +} + +func threadWriteData() { + defer wg.Done() + for { + _, err := gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + AssertNil(err) + if stopLabel { + break + } + } +} + +func waitData(tableName string, dataRow int) { + for { + tmp, err := gcConn.RunScript("(exec count(*) from " + tableName + ")[0]") + AssertNil(err) + rowNum := tmp.(*model.Scalar) + fmt.Printf("\nexpectedData is: %v", dataRow) + fmt.Printf("\nactualData is: %v", rowNum) + if dataRow == int(rowNum.Value().(int32)) { + break + } + time.Sleep(1 * time.Second) + } +} + +type MessageHandler struct{} + +func (s *MessageHandler) DoEvent(msg streaming.IMessage) { + val0 := msg.GetValue(0).(*model.Scalar).DataType.String() + val1 := msg.GetValue(1).(*model.Scalar).DataType.String() + val2 := msg.GetValue(2).(*model.Scalar).DataType.String() + script := fmt.Sprintf("insert into Receive values(%s,%s,%s)", + val0, val1, val2) + _, err := gcConn.RunScript(script) + util.AssertNil(err) +} + +func TestGoroutineClient_bachSize_throttle(t *testing.T) { + Convey("test_NewGoroutinePooledClient_batchSize_lt0", t, func() { + CreateStreamingTableforGcTest() + filter1, err := gcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "action1", + Offset: 0, + Reconnect: true, + Filter: filter1.(*model.Vector), + } + req.SetBatchSize(-10000).SetThrottle(1) + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) + Convey("test_NewGoroutinePooledClient_throttle_less_than_0", t, func() { + CreateStreamingTableforGcTest() + filter1, err := gcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "action1", + Offset: 0, + Reconnect: true, + Filter: filter1.(*model.Vector), + } + req.SetBatchSize(10000).SetThrottle(-10) + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) + Convey("test_NewGoroutinePooledClient_MessageHandler_throttle_less_than_0", t, func() { + gcConn, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + CreateStreamingTableforGcTest() + filter1, err := gcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "action1", + Offset: 0, + Reconnect: true, + Filter: filter1.(*model.Vector), + Handler: new(MessageHandler), + } + req.SetBatchSize(10000).SetThrottle(10) + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) + Convey("test_NewGoroutinePooledClient_MessageHandler_batchSize_lt0", t, func() { + CreateStreamingTableforGcTest() + filter1, err := gcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "action1", + Offset: 0, + Reconnect: true, + Filter: filter1.(*model.Vector), + Handler: new(MessageHandler), + } + req.SetBatchSize(-10000).SetThrottle(-10) + err = tpc.Subscribe(req) + So(tpc.IsClosed(), ShouldBeFalse) + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) + Convey("test_NewGoroutinePooledClient_MessageHandler_batchSize_Throttle_lt0", t, func() { + CreateStreamingTableforGcTest() + filter1, err := gcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "action1", + Offset: 0, + Reconnect: true, + Filter: filter1.(*model.Vector), + Handler: new(MessageHandler), + } + req.SetBatchSize(-10000).SetThrottle(-5) + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) + Convey("test_NewGoroutinePooledClient_batchSize_Throttle_lt0", t, func() { + CreateStreamingTableforGcTest() + filter1, err := gcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "action1", + Offset: -1, + Reconnect: true, + Filter: filter1.(*model.Vector), + Handler: new(MessageHandler), + } + req.SetBatchSize(-10000).SetThrottle(-5) + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +func TestGoroutineClient_tableName_offset(t *testing.T) { + Convey("TestGoroutineClient_tableName_offset", t, func() { + CreateStreamingTableforGcTest() + _, err := gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "action1", + Offset: 0, + Reconnect: false, + Handler: new(MessageHandler), + } + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + waitData("Receive", 2000) + reTmp, err := gcConn.RunScript("Receive") + So(err, ShouldBeNil) + exTmp, err := gcConn.RunScript("Trades") + So(err, ShouldBeNil) + re := reTmp.(*model.Table) + ex := exTmp.(*model.Table) + CheckmodelTableEqual(re, ex, 0) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +func TestGoroutineClient_tableName_actionName(t *testing.T) { + Convey("TestGoroutineClient_tableName_actionName", t, func() { + CreateStreamingTableforGcTest() + _, err := gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades1", + Offset: 0, + Reconnect: false, + Handler: new(MessageHandler), + } + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + waitData("Receive", 4000) + reTmp, err := gcConn.RunScript("Receive") + So(err, ShouldBeNil) + exTmp, err := gcConn.RunScript("Trades") + So(err, ShouldBeNil) + re := reTmp.(*model.Table) + ex := exTmp.(*model.Table) + So(re.Rows(), ShouldEqual, 4000) + CheckmodelTableEqual(re, ex, 0) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +func TestGoroutineClient_tableName_handler_offset_reconnect_success(t *testing.T) { + Convey("TestGoroutineClient_tableName_handler_offset_reconnect_success", t, func() { + CreateStreamingTableforGcTest() + _, err := gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + Offset: -1, + Reconnect: true, + Handler: new(MessageHandler), + } + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + wg.Add(1) + go threadWriteData() + time.Sleep(2 * time.Second) + _, err = gcConn.RunScript("stopPublishTable('" + setup.IP + "'," + strconv.Itoa(setup.Port) + ",'Trades')") + So(err, ShouldBeNil) + time.Sleep(2 * time.Second) + _, err = gcConn.RunScript("stopPublishTable('" + setup.IP + "'," + strconv.Itoa(setup.Port) + ",'Trades')") + So(err, ShouldBeNil) + rowNum1, err := gcConn.RunScript("(exec count(*) from Receive)[0]") + So(err, ShouldBeNil) + reRowNum1 := rowNum1.(*model.Scalar) + time.Sleep(3 * time.Second) + rowNum2, err := gcConn.RunScript("(exec count(*) from Receive)[0]") + So(err, ShouldBeNil) + reRowNum2 := rowNum2.(*model.Scalar) + stopLabel = true + wg.Wait() + So(reRowNum2.Value(), ShouldBeGreaterThanOrEqualTo, reRowNum1.Value()) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +func TestGoroutineClient_subscribe_TableName_ActionName_Handler_reconnect(t *testing.T) { + Convey("TestGoroutineClient_subscribe_TableName_ActionName_Handler_reconnect", t, func() { + CreateStreamingTableforGcTest() + _, err := gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades1", + Reconnect: true, + Handler: new(MessageHandler), + } + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + wg.Add(1) + go threadWriteData() + time.Sleep(2 * time.Second) + _, err = gcConn.RunScript("stopPublishTable('" + setup.IP + "'," + strconv.Itoa(setup.Port) + ",'Trades')") + So(err, ShouldBeNil) + time.Sleep(2 * time.Second) + _, err = gcConn.RunScript("stopPublishTable('" + setup.IP + "'," + strconv.Itoa(setup.Port) + ",'Trades')") + So(err, ShouldBeNil) + rowNum1, err := gcConn.RunScript("(exec count(*) from Receive)[0]") + So(err, ShouldBeNil) + reRowNum1 := rowNum1.(*model.Scalar) + time.Sleep(3 * time.Second) + rowNum2, err := gcConn.RunScript("(exec count(*) from Receive)[0]") + So(err, ShouldBeNil) + reRowNum2 := rowNum2.(*model.Scalar) + stopLabel = true + wg.Wait() + So(reRowNum2.Value(), ShouldBeGreaterThanOrEqualTo, reRowNum1.Value()) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +func TestGoroutineClient_subscribe_TableName_ActionName_Handler_offset_0(t *testing.T) { + Convey("TestGoroutineClient_subscribe_TableName_ActionName_Handler_offset_0", t, func() { + CreateStreamingTableforGcTest() + _, err := gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades1", + Offset: 0, + Handler: new(MessageHandler), + } + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + waitData("Receive", 2000) + tmp1, err := gcConn.RunScript("Receive") + So(err, ShouldBeNil) + re := tmp1.(*model.Table) + tmp2, err := gcConn.RunScript("Trades") + So(err, ShouldBeNil) + ex := tmp2.(*model.Table) + CheckmodelTableEqual(re, ex, 0) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +func TestGoroutineClient_subscribe_TableName_ActionName_Handler_offset_negative(t *testing.T) { + Convey("TestGoroutineClient_subscribe_TableName_ActionName_Handler_offset_negative", t, func() { + CreateStreamingTableforGcTest() + _, err := gcConn.RunScript("n=1000;t=table(1..n as tag,2020.01.04T12:23:45+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades1", + Offset: -3, + Handler: new(MessageHandler), + } + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,2020.01.01T12:23:45+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,2020.01.02T12:23:45+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,2020.01.03T12:23:45+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + waitData("Receive", 3000) + tmp1, err := gcConn.RunScript("Receive") + So(err, ShouldBeNil) + re := tmp1.(*model.Table) + tmp2, err := gcConn.RunScript("select * from Trades where rowNo(tag)>=1000") + So(err, ShouldBeNil) + ex := tmp2.(*model.Table) + So(re.Rows(), ShouldEqual, 3000) + CheckmodelTableEqual(re, ex, 0) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +func TestGoroutineClient_subscribe_TableName_ActionName_Handler_offset_10(t *testing.T) { + Convey("TestGoroutineClient_subscribe_TableName_ActionName_Handler_offset_10", t, func() { + CreateStreamingTableforGcTest() + _, err := gcConn.RunScript("n=1000;t=table(1..n as tag,2020.01.04T12:23:45+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTradesOffset", + Offset: 10, + Handler: new(MessageHandler), + } + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,2020.01.01T12:23:45+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,2020.01.02T12:23:45+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,2020.01.03T12:23:45+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + waitData("Receive", 3990) + tmp1, err := gcConn.RunScript("Receive") + So(err, ShouldBeNil) + re := tmp1.(*model.Table) + tmp2, err := gcConn.RunScript("select * from Trades where rowNo(tag)>=10") + So(err, ShouldBeNil) + ex := tmp2.(*model.Table) + So(re.Rows(), ShouldEqual, 3990) + CheckmodelTableEqual(re, ex, 0) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +func TestGoroutineClient_subscribe_offset_morethan_tableCount(t *testing.T) { + Convey("TestGoroutineClient_subscribe_offset_morethan_tableCount", t, func() { + CreateStreamingTableforGcTest() + _, err := gcConn.RunScript("n=1000;t=table(1..n as tag,2020.01.04T12:23:45+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades1", + Offset: 1000, + Handler: new(MessageHandler), + } + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + time.Sleep(3 * time.Second) + tmp1, err := gcConn.RunScript("Receive") + So(err, ShouldBeNil) + re := tmp1.(*model.Table) + So(re.Rows(), ShouldEqual, 0) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +type Handlerx struct{} + +func (s *Handlerx) DoEvent(msg streaming.IMessage) { + val0 := msg.GetValue(0).(*model.Scalar).DataType.String() + val1 := msg.GetValue(1).(*model.Scalar).DataType.String() + val2 := msg.GetValue(2).(*model.Scalar).DataType.String() + script := fmt.Sprintf("insert into filter values(%s,%s,%s)", + val0, val1, val2) + _, err := gcConn.RunScript(script) + util.AssertNil(err) +} + +func TestGoroutineClient_subscribe_filter(t *testing.T) { + Convey("TestGoroutineClient_subscribe_filter", t, func() { + CreateStreamingTableforGcTest() + script3 := "try{dropStreamTable('st3')}catch(ex){};\n" + "st3 = streamTable(1000000:0,`tag`ts`data,[INT,TIMESTAMP,DOUBLE])\n" + + "enableTableShareAndPersistence(table=st3, tableName=`filter, asynWrite=true, compress=true, cacheSize=200000, retentionMinutes=180)\t\n" + _, err := gcConn.RunScript(script3) + AssertNil(err) + filter1, err := gcConn.RunScript("1..1000") + So(err, ShouldBeNil) + filter2, err := gcConn.RunScript("2001..3000") + So(err, ShouldBeNil) + req1 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades1", + Offset: -1, + Filter: filter1.(*model.Vector), + Handler: new(MessageHandler), + } + req2 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades2", + Offset: -1, + Filter: filter2.(*model.Vector), + Handler: new(Handlerx), + } + err = tpc.Subscribe(req1) + So(err, ShouldBeNil) + err = tpc.Subscribe(req2) + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=4000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + waitData("Receive", 1000) + waitData("filter", 1000) + tmp1, err := gcConn.RunScript("Receive") + So(err, ShouldBeNil) + tmp2, err := gcConn.RunScript("Trades") + So(err, ShouldBeNil) + tmp3, err := gcConn.RunScript("filter") + So(err, ShouldBeNil) + _, err = gcConn.RunScript("dropStreamTable(`filter)") + AssertNil(err) + err = tpc.UnSubscribe(req1) + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req2) + So(err, ShouldBeNil) + re1 := tmp1.(*model.Table) + ex := tmp2.(*model.Table) + re2 := tmp3.(*model.Table) + So(re1.Rows(), ShouldEqual, 1000) + So(re2.Rows(), ShouldEqual, 1000) + CheckmodelTableEqual(re1, ex, 0) + CheckmodelTableEqual(re2, ex, 2000) + }) +} + +func CheckmodelTableEqual_throttle(t1 *model.Table, t2 *model.Table, m int, n int) bool { + for i := 0; i < 1000; i++ { + for j := 0; j < len(t1.GetColumnNames()); j++ { + if t1.GetColumnByIndex(j).Get(i+m).Value() != t2.GetColumnByIndex(j).Get(n+i).Value() { + return false + } + } + } + return true +} +func TestGoroutineClient_batchSize_throttle(t *testing.T) { + Convey("TestGoroutineClient_batchSize_throttle", t, func() { + CreateStreamingTableforGcTest() + filter1, err := gcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req1 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades1", + Offset: -1, + Filter: filter1.(*model.Vector), + Handler: new(MessageHandler), + Reconnect: true, + } + req1.SetBatchSize(10000).SetThrottle(5) + err = tpc.Subscribe(req1) + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=10000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=10000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + waitData("Receive", 2000) + tmp1, err := gcConn.RunScript("Receive") + So(err, ShouldBeNil) + tmp2, err := gcConn.RunScript("Trades") + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req1) + So(err, ShouldBeNil) + re1 := tmp1.(*model.Table) + ex := tmp2.(*model.Table) + So(re1.Rows(), ShouldEqual, 2000) + fmt.Println(ex.Rows()) + CheckmodelTableEqual_throttle(re1, ex, 0, 0) + CheckmodelTableEqual_throttle(re1, ex, 1000, 10000) + }) +} + +func TestGoroutineClient_batchSize_throttle2(t *testing.T) { + Convey("TestGoroutineClient_batchSize_throttle2", t, func() { + CreateStreamingTableforGcTest() + filter1, err := gcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req1 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades1", + Offset: -1, + Filter: filter1.(*model.Vector), + Handler: new(MessageHandler), + Reconnect: true, + } + req1.SetBatchSize(10000).SetThrottle(5) + err = tpc.Subscribe(req1) + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=100;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=100;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + waitData("Receive", 200) + tmp1, err := gcConn.RunScript("Receive") + So(err, ShouldBeNil) + tmp2, err := gcConn.RunScript("Trades") + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req1) + So(err, ShouldBeNil) + re1 := tmp1.(*model.Table) + ex := tmp2.(*model.Table) + So(re1.Rows(), ShouldEqual, 200) + CheckmodelTableEqual(re1, ex, 0) + }) +} + +func TestGoroutineClient_subscribe_unsubscribe_resubscribe(t *testing.T) { + Convey("TestGoroutineClient_subscribe_unsubscribe_resubscribe", t, func() { + CreateStreamingTableforGcTest() + filter1, err := gcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req1 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades1", + Offset: -1, + Filter: filter1.(*model.Vector), + Handler: new(MessageHandler), + Reconnect: true, + } + req1.SetBatchSize(10000).SetThrottle(5) + err = tpc.Subscribe(req1) + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req1) + So(err, ShouldBeNil) + err = tpc.Subscribe(req1) + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req1) + So(err, ShouldBeNil) + }) +} + +func TestGoroutineClient_subscribe_TableName_ActionName_Handler_offset_reconnect_filter_AllowExistTopic(t *testing.T) { + Convey("TestGoroutineClient_subscribe_TableName_ActionName_Handler_offset_reconnect_filter_AllowExistTopic", t, func() { + CreateStreamingTableforGcTest() + _, err := gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + filter1, err := gcConn.RunScript("1..100000") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades1", + Offset: 0, + Reconnect: true, + Filter: filter1.(*model.Vector), + Handler: new(MessageHandler), + AllowExists: true, + } + req.SetBatchSize(100).SetThrottle(5) + err = tpc.Subscribe(req) + So(err, ShouldBeNil) + wg.Add(1) + go threadWriteData() + time.Sleep(2 * time.Second) + _, err = gcConn.RunScript("stopPublishTable('" + setup.IP + "'," + strconv.Itoa(setup.SubPort) + ",'Trades', 'subTrades1')") + So(err, ShouldBeNil) + rowNum1, err := gcConn.RunScript("(exec count(*) from Receive)[0]") + So(err, ShouldBeNil) + reRowNum1 := rowNum1.(*model.Scalar) + time.Sleep(3 * time.Second) + rowNum2, err := gcConn.RunScript("(exec count(*) from Receive)[0]") + So(err, ShouldBeNil) + reRowNum2 := rowNum2.(*model.Scalar) + stopLabel = true + wg.Wait() + So(reRowNum2.Value(), ShouldBeGreaterThanOrEqualTo, reRowNum1.Value()) + err = tpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +func TestGoroutineClient_subscribe_not_contain_handler(t *testing.T) { + Convey("TestGoroutineClient_subscribe_not_contain_handler_1000", t, func() { + CreateStreamingTableforGcTest() + req1 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "Trades", + ActionName: "subTrades1", + Offset: -1, + Reconnect: true, + } + err := tpc.Subscribe(req1) + So(err, ShouldBeNil) + _, err = gcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "Trades.append!(t)") + So(err, ShouldBeNil) + tmp2, err := gcConn.RunScript("Trades") + So(err, ShouldBeNil) + err = tpc.UnSubscribe(req1) + So(err, ShouldBeNil) + ex := tmp2.(*model.Table) + So(1000, ShouldEqual, ex.Rows()) + }) +} + +func TestClear(t *testing.T) { + Convey("test_clear_gc", t, func() { + So(tpc.IsClosed(), ShouldBeFalse) + tpc.Close() + So(tpc.IsClosed(), ShouldBeTrue) + So(gcConn.Close(), ShouldBeNil) + }) +} diff --git a/test/streaming/goroutinePooledClient_test.go b/test/streaming/goroutinePooledClient_test.go new file mode 100644 index 0000000..4fa4f75 --- /dev/null +++ b/test/streaming/goroutinePooledClient_test.go @@ -0,0 +1,439 @@ +package test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/example/util" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/streaming" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +var gpc = streaming.NewGoroutinePooledClient(setup.IP, 8696) +var gpcConn, _ = api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + +func CreateStreamingTableforGpcTest() { + _, err := gpcConn.RunScript("login(`admin,`123456);" + + "try{dropStreamTable('TradesTable')}catch(ex){};" + + "try{dropStreamTable('ReceiveTable')}catch(ex){};") + AssertNil(err) + _, err = gpcConn.RunScript("st1 = streamTable(1000000:0,`tag`ts`data,[INT,TIMESTAMP,DOUBLE])\n" + + "enableTableShareAndPersistence(table=st1, tableName=`TradesTable, asynWrite=true, compress=true, cacheSize=200000, retentionMinutes=180)\t\n" + "setStreamTableFilterColumn(objByName(`TradesTable),`tag)") + AssertNil(err) + _, err = gpcConn.RunScript("st2 = streamTable(1000000:0,`tag`ts`data,[INT,TIMESTAMP,DOUBLE])\n" + + "enableTableShareAndPersistence(table=st2, tableName=`ReceiveTable, asynWrite=true, compress=true, cacheSize=200000, retentionMinutes=180)\t\n") + AssertNil(err) +} + +func waitDataGpc(tableName string, dataRow int) { + for { + tmp, err := gpcConn.RunScript("(exec count(*) from " + tableName + ")[0]") + AssertNil(err) + rowNum := tmp.(*model.Scalar) + fmt.Printf("\nexpectedData is: %v", dataRow) + fmt.Printf("\nactualData is: %v", rowNum) + if dataRow == int(rowNum.Value().(int32)) { + break + } + time.Sleep(2 * time.Second) + } +} + +type gpcMessageHandler struct{} + +func (s *gpcMessageHandler) DoEvent(msg streaming.IMessage) { + val0 := msg.GetValue(0).(*model.Scalar).DataType.String() + val1 := msg.GetValue(1).(*model.Scalar).DataType.String() + val2 := msg.GetValue(2).(*model.Scalar).DataType.String() + script := fmt.Sprintf("insert into ReceiveTable values(%s,%s,%s)", + val0, val1, val2) + _, err := gpcConn.RunScript(script) + util.AssertNil(err) +} + +func TestNewGoroutinePooledClient_subscribe_ex_ubsubscribe(t *testing.T) { + Convey("TestNewGoroutinePooledClient_subscribe_ex_ubsubscribe", t, func() { + CreateStreamingTableforGpcTest() + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + ActionName: "subTradesTable", + Handler: new(gpcMessageHandler), + } + req.SetBatchSize(-10000).SetThrottle(1) + err := gpc.Subscribe(req) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + waitDataGpc("ReceiveTable", 1000) + tmp1, err := gpcConn.RunScript("select * from ReceiveTable order by tag") + So(err, ShouldBeNil) + tmp2, err := gpcConn.RunScript("select * from TradesTable order by tag") + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("dropStreamTable('TradesTable');dropStreamTable('ReceiveTable')") + So(err, ShouldNotBeNil) + re := tmp1.(*model.Table) + ex := tmp2.(*model.Table) + CheckmodelTableEqual(re, ex, 0) + err = gpc.UnSubscribe(req) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("dropStreamTable('TradesTable');dropStreamTable('ReceiveTable')") + So(err, ShouldBeNil) + }) +} + +func TestNewGoroutinePooledClient_subscribe_ex_ActionName(t *testing.T) { + Convey("TestNewGoroutinePooledClient_subscribe_ex_ActionName", t, func() { + CreateStreamingTableforGpcTest() + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + Offset: 0, + Reconnect: true, + Handler: new(gpcMessageHandler), + } + err := gpc.Subscribe(req) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + waitDataGpc("ReceiveTable", 1000) + err = gpc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +func TestNewGoroutinePooledClient_subscribe_exTableName(t *testing.T) { + Convey("TestNewGoroutinePooledClient_subscribe_exTableName", t, func() { + CreateStreamingTableforGpcTest() + req := &streaming.SubscribeRequest{ + Address: setup.Address, + Offset: 0, + Reconnect: true, + Handler: new(gpcMessageHandler), + } + err := gpc.Subscribe(req) + So(err, ShouldNotBeNil) + }) +} +func TestNewGoroutinePooledClient_subscribe_ex_offset(t *testing.T) { + Convey("TestNewGoroutinePooledClient_subscribe_ex_offset", t, func() { + CreateStreamingTableforGpcTest() + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + ActionName: "subTradesTable", + Offset: -2, + Reconnect: true, + Handler: new(gpcMessageHandler), + } + err := gpc.Subscribe(req) + So(err, ShouldNotBeNil) + }) +} + +func TestNewGoroutinePooledClient_subscribe_offset_0(t *testing.T) { + Convey("TestNewGoroutinePooledClient_subscribe_offset_0", t, func() { + CreateStreamingTableforGpcTest() + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + ActionName: "subTradesTable", + Offset: 0, + Reconnect: true, + Handler: new(gpcMessageHandler), + } + req.SetBatchSize(-10000).SetThrottle(1) + err := gpc.Subscribe(req) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + waitDataGpc("ReceiveTable", 1000) + tmp1, err := gpcConn.RunScript("select * from ReceiveTable order by tag") + So(err, ShouldBeNil) + tmp2, err := gpcConn.RunScript("select * from TradesTable order by tag") + So(err, ShouldBeNil) + re := tmp1.(*model.Table) + ex := tmp2.(*model.Table) + So(re.Rows(), ShouldEqual, 1000) + So(ex.Rows(), ShouldEqual, 1000) + CheckmodelTableEqual(re, ex, 0) + err = gpc.UnSubscribe(req) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("dropStreamTable('TradesTable');dropStreamTable('ReceiveTable')") + So(err, ShouldBeNil) + }) +} + +func TestNewGoroutinePooledClient_subscribe_offset_negative(t *testing.T) { + Convey("TestNewGoroutinePooledClient_subscribe_offset_negative", t, func() { + CreateStreamingTableforGpcTest() + _, err := gpcConn.RunScript("n=100;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + ActionName: "subTradesTable", + Offset: -1, + Reconnect: true, + Handler: new(gpcMessageHandler), + } + req.SetBatchSize(-10000).SetThrottle(1) + err = gpc.Subscribe(req) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + waitDataGpc("ReceiveTable", 2000) + tmp1, err := gpcConn.RunScript("select * from ReceiveTable order by tag, ts, data") + So(err, ShouldBeNil) + tmp2, err := gpcConn.RunScript("select * from TradesTable where rowNo(tag)>=100 order by tag, ts, data") + So(err, ShouldBeNil) + re := tmp1.(*model.Table) + ex := tmp2.(*model.Table) + So(re.Rows(), ShouldEqual, 2000) + So(ex.Rows(), ShouldEqual, 2000) + CheckmodelTableEqual(re, ex, 0) + err = gpc.UnSubscribe(req) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("dropStreamTable('TradesTable');dropStreamTable('ReceiveTable')") + So(err, ShouldBeNil) + }) +} + +func TestNewGoroutinePooledClient_subscribe_offset_10(t *testing.T) { + Convey("TestNewGoroutinePooledClient_subscribe_offset_10", t, func() { + CreateStreamingTableforGpcTest() + _, err := gpcConn.RunScript("n=100;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + ActionName: "subTradesTable", + Offset: 10, + Reconnect: true, + Handler: new(gpcMessageHandler), + } + req.SetBatchSize(-10000).SetThrottle(1) + err = gpc.Subscribe(req) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("n=1000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + waitDataGpc("ReceiveTable", 1090) + tmp1, err := gpcConn.RunScript("select * from ReceiveTable order by tag") + So(err, ShouldBeNil) + tmp2, err := gpcConn.RunScript("select * from TradesTable where rowNo(tag)>=10 order by tag") + So(err, ShouldBeNil) + re := tmp1.(*model.Table) + ex := tmp2.(*model.Table) + So(re.Rows(), ShouldEqual, 1090) + So(ex.Rows(), ShouldEqual, 1090) + err = gpc.UnSubscribe(req) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("dropStreamTable('TradesTable');dropStreamTable('ReceiveTable')") + So(err, ShouldBeNil) + }) +} + +func TestNewGoroutinePooledClient_subscribe_offset_morethan_rowCount(t *testing.T) { + Convey("TestNewGoroutinePooledClient_subscribe_offset_morethan_rowCount", t, func() { + CreateStreamingTableforGpcTest() + _, err := gpcConn.RunScript("n=100;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + ActionName: "subTradesTable", + Offset: 1000, + Reconnect: true, + Handler: new(gpcMessageHandler), + } + err = gpc.Subscribe(req) + So(err, ShouldNotBeNil) + }) +} + +type Handlegpc struct{} + +func (s *Handlegpc) DoEvent(msg streaming.IMessage) { + val0 := msg.GetValue(0).(*model.Scalar).DataType.String() + val1 := msg.GetValue(1).(*model.Scalar).DataType.String() + val2 := msg.GetValue(2).(*model.Scalar).DataType.String() + script := fmt.Sprintf("insert into filter values(%s,%s,%s)", + val0, val1, val2) + _, err := gpcConn.RunScript(script) + util.AssertNil(err) +} + +func TestNewGoroutinePooledClient_subscribe_filter(t *testing.T) { + Convey("TestNewGoroutinePooledClient_subscribe_filter", t, func() { + CreateStreamingTableforGpcTest() + script2 := "tmp3 = streamTable(1000000:0,`tag`ts`data,[INT,TIMESTAMP,DOUBLE])\n" + + "enableTableShareAndPersistence(table=tmp3, tableName=`filter, asynWrite=true, compress=true, cacheSize=200000, retentionMinutes=180)\t\n" + _, err := gpcConn.RunScript(script2) + So(err, ShouldBeNil) + filter1, err := gpcConn.RunScript("1..1000") + So(err, ShouldBeNil) + filter2, err := gpcConn.RunScript("2001..3000") + So(err, ShouldBeNil) + req1 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + ActionName: "subTradesTable1", + Offset: -1, + Reconnect: true, + Filter: filter1.(*model.Vector), + Handler: new(gpcMessageHandler), + } + req2 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + ActionName: "subTradesTable2", + Offset: -1, + Reconnect: true, + Filter: filter2.(*model.Vector), + Handler: new(Handlegpc), + } + err = gpc.Subscribe(req1) + So(err, ShouldBeNil) + err = gpc.Subscribe(req2) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("n=4000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + waitDataGpc("ReceiveTable", 1000) + waitDataGpc("filter", 1000) + tmp1, err := gpcConn.RunScript("select * from ReceiveTable order by tag, ts, data") + So(err, ShouldBeNil) + tmp2, err := gpcConn.RunScript("select * from TradesTable order by tag, ts, data") + So(err, ShouldBeNil) + tmp3, err := gpcConn.RunScript("select * from filter order by tag, ts, data") + So(err, ShouldBeNil) + re1 := tmp1.(*model.Table) + ex := tmp2.(*model.Table) + re2 := tmp3.(*model.Table) + So(re1.Rows(), ShouldEqual, 1000) + So(re2.Rows(), ShouldEqual, 1000) + CheckmodelTableEqual(re1, ex, 0) + CheckmodelTableEqual(re2, ex, 2000) + err = gpc.UnSubscribe(req1) + So(err, ShouldBeNil) + err = gpc.UnSubscribe(req2) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("dropStreamTable('TradesTable');dropStreamTable('ReceiveTable');dropStreamTable('filter')") + So(err, ShouldBeNil) + }) +} + +func TestNewGoroutinePooledClient_batchSize_throttle(t *testing.T) { + Convey("TestNewGoroutinePooledClient_batchSize_throttle", t, func() { + CreateStreamingTableforGpcTest() + filter1, err := gpcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req1 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + ActionName: "subTradesTable1", + Offset: -1, + Filter: filter1.(*model.Vector), + Handler: new(gpcMessageHandler), + Reconnect: true, + } + req1.SetBatchSize(10000).SetThrottle(5) + err = gpc.Subscribe(req1) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("n=10000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("n=10000;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + waitDataGpc("ReceiveTable", 2000) + tmp1, err := gpcConn.RunScript("select * from ReceiveTable order by tag") + So(err, ShouldBeNil) + tmp2, err := gpcConn.RunScript("select * from TradesTable order by tag") + So(err, ShouldBeNil) + err = gpc.UnSubscribe(req1) + So(err, ShouldBeNil) + re1 := tmp1.(*model.Table) + ex := tmp2.(*model.Table) + So(re1.Rows(), ShouldEqual, 2000) + So(ex.Rows(), ShouldEqual, 20000) + _, err = gpcConn.RunScript("dropStreamTable('TradesTable');dropStreamTable('ReceiveTable')") + So(err, ShouldBeNil) + }) +} + +func TestNewGoroutinePooledClient_batchSize_throttle2(t *testing.T) { + Convey("TestNewGoroutinePooledClient_batchSize_throttle2", t, func() { + CreateStreamingTableforGpcTest() + filter1, err := gpcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req1 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + ActionName: "subTradesTable1", + Offset: -1, + Filter: filter1.(*model.Vector), + Handler: new(gpcMessageHandler), + Reconnect: true, + } + req1.SetBatchSize(10000).SetThrottle(5) + err = gpc.Subscribe(req1) + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("n=100;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + _, err = gpcConn.RunScript("n=100;t=table(1..n as tag,now()+1..n as ts,rand(100.0,n) as data);" + "TradesTable.append!(t)") + So(err, ShouldBeNil) + waitDataGpc("ReceiveTable", 200) + tmp1, err := gpcConn.RunScript("select * from ReceiveTable order by tag,ts,data") + So(err, ShouldBeNil) + tmp2, err := gpcConn.RunScript("select * from TradesTable order by tag,ts,data") + So(err, ShouldBeNil) + err = gpc.UnSubscribe(req1) + So(err, ShouldBeNil) + re1 := tmp1.(*model.Table) + ex := tmp2.(*model.Table) + So(re1.Rows(), ShouldEqual, 200) + CheckmodelTableEqual(re1, ex, 0) + _, err = gpcConn.RunScript("dropStreamTable('TradesTable');dropStreamTable('ReceiveTable')") + So(err, ShouldBeNil) + }) +} + +func TestNewGoroutinePooledClient_subscribe_unsubscribe_resubscribe(t *testing.T) { + Convey("TestNewGoroutinePooledClient_subscribe_unsubscribe_resubscribe", t, func() { + CreateStreamingTableforGpcTest() + filter1, err := gpcConn.RunScript("1..1000") + So(err, ShouldBeNil) + req1 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "TradesTable", + ActionName: "subTradesTable1", + Offset: -1, + Filter: filter1.(*model.Vector), + Handler: new(gpcMessageHandler), + Reconnect: true, + } + req1.SetBatchSize(10000).SetThrottle(5) + err = gpc.Subscribe(req1) + So(err, ShouldBeNil) + err = gpc.UnSubscribe(req1) + So(err, ShouldBeNil) + err = gpc.Subscribe(req1) + So(err, ShouldBeNil) + err = gpc.UnSubscribe(req1) + So(err, ShouldBeNil) + }) +} + +func TestClearGpc(t *testing.T) { + Convey("test_clear_gpc", t, func() { + So(gpc.IsClosed(), ShouldBeFalse) + gpc.Close() + So(gpc.IsClosed(), ShouldBeTrue) + So(gpcConn.Close(), ShouldBeNil) + }) +} diff --git a/test/streaming/pollingClient_test.go b/test/streaming/pollingClient_test.go new file mode 100644 index 0000000..d0d45db --- /dev/null +++ b/test/streaming/pollingClient_test.go @@ -0,0 +1,319 @@ +package test + +import ( + "context" + "fmt" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/streaming" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +var pc = streaming.NewPollingClient(setup.IP, 9999) + +func CreateStreamingTable() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + script := "try{ dropStreamTable(`tradesTable) }catch(ex){};" + + "share streamTable(10000:0,`timev`sym`pricev, [TIMESTAMP,SYMBOL,DOUBLE]) as tradesTable;" + _, err = ddb.RunScript(script) + AssertNil(err) + err = ddb.Close() + AssertNil(err) +} + +func TestSubscribe_exception(t *testing.T) { + Convey("Test_subscribe_exception", t, func() { + Convey("Test_AbstractClient_shared_table_polling_doesnot_exist_exception", func() { + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "polling", + ActionName: "action1", + Offset: 0, + Reconnect: true, + } + _, err := pc.Subscribe(req) + So(err, ShouldNotBeNil) + }) + Convey("Test_subscribe_err_host", func() { + req := &streaming.SubscribeRequest{ + Address: "200.48.100.451:8876", + TableName: "polling", + ActionName: "action1", + Offset: 0, + Reconnect: true, + } + _, err := pc.Subscribe(req) + So(err, ShouldNotBeNil) + }) + Convey("Test_subscribe_err_port", func() { + req := &streaming.SubscribeRequest{ + Address: setup.IP + ":8876", + TableName: "polling", + ActionName: "action1", + Offset: 0, + Reconnect: true, + } + _, err := pc.Subscribe(req) + So(err, ShouldNotBeNil) + }) + Convey("Test_subscribe_err_TableName", func() { + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "", + ActionName: "action1", + Offset: 0, + Reconnect: true, + } + _, err := pc.Subscribe(req) + So(err, ShouldNotBeNil) + }) + Convey("Test_subscribe_ActionName_null", func() { + req := &streaming.SubscribeRequest{ + Address: setup.Address, + ActionName: "", + TableName: "polling", + Offset: 0, + Reconnect: true, + } + _, err := pc.Subscribe(req) + So(err, ShouldNotBeNil) + }) + }) +} + +func TestPollingClient(t *testing.T) { + Convey("Test_PollingClient_test_size", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + CreateStreamingTable() + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "tradesTable", + ActionName: "action1", + Offset: 0, + Reconnect: true, + } + poller, err := pc.Subscribe(req) + So(err, ShouldBeNil) + Convey("Test_GetTopicPoller_exitsing_data", func() { + msg := poller.Poll(1000, 10) + So(len(msg), ShouldEqual, 0) + }) + Convey("Test_poll_size_sub_data", func() { + for i := 0; i < 10; i++ { //data 0 { + So(len(msg), ShouldEqual, 50) + } + } + for i := 0; i < 10; i++ { //data>size + _, err = ddb.RunScript("dataNum=5000;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + msg := poller.Poll(100000, 1000) + if msg == nil { + continue + } else if len(msg) > 0 { + So(len(msg), ShouldBeGreaterThanOrEqualTo, 1000) + } + } + for i := 0; i < 10; i++ { //data=size + _, err = ddb.RunScript("dataNum=5000;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + msg := poller.Poll(100000, 5000) + if msg == nil { + continue + } else if len(msg) > 0 { + So(len(msg), ShouldEqual, 5000) + } + } + for i := 0; i < 10; i++ { //bigsize + _, err = ddb.RunScript("dataNum=5000;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + msg := poller.Poll(1000, 1000000000) + if msg == nil { + continue + } else if len(msg) > 0 { + So(len(msg), ShouldBeGreaterThanOrEqualTo, 5000) + } + } + for i := 0; i < 10; i++ { //bigData + _, err = ddb.RunScript("dataNum=10000000;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + msg := poller.Poll(1000, 10000) + if msg == nil { + continue + } else if len(msg) > 0 { + So(len(msg), ShouldBeGreaterThanOrEqualTo, 10000) + } + } + for i := 0; i < 10; i++ { //smallData + _, err = ddb.RunScript("dataNum=1;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + msg := poller.Poll(1000, 10000) + if msg == nil { + continue + } else if len(msg) > 0 { + So(len(msg), ShouldBeGreaterThanOrEqualTo, 1) + } + } + for i := 0; i < 10; i++ { //append Many times + _, err = ddb.RunScript("dataNum=10;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dataNum=20;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dataNum=30;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dataNum=40;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + _, err = ddb.RunScript("dataNum=50;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + msg := poller.Poll(1000, 10000) + if msg == nil { + continue + } else if len(msg) > 0 { + So(len(msg), ShouldBeGreaterThanOrEqualTo, 100) + } + } + for i := 0; i < 10; i++ { //size=0 + _, err = ddb.RunScript("dataNum=100;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + msg := poller.Poll(1000, 0) + if msg == nil { + continue + } else if len(msg) > 0 { + So(len(msg), ShouldEqual, 100) + } + } + for i := 0; i < 10; i++ { //size<0 + _, err = ddb.RunScript("dataNum=100;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + msg := poller.Poll(1000, -10) + if msg == nil { + continue + } else if len(msg) > 0 { + So(len(msg), ShouldEqual, 100) + } + } + }) + err = pc.UnSubscribe(req) + So(err, ShouldBeNil) + }) +} + +func TestSubsribe_size(t *testing.T) { + Convey("TestSubsribe_size", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + CreateStreamingTable() + req1 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "tradesTable", + ActionName: "subtrades1", + Offset: 0, + Reconnect: true, + } + poller1, err := pc.Subscribe(req1) + So(err, ShouldBeNil) + req2 := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "tradesTable", + ActionName: "subtrades2", + Offset: 0, + Reconnect: true, + } + poller2, err := pc.Subscribe(req2) + So(err, ShouldBeNil) + for i := 0; i < 10; i++ { + _, err = ddb.RunScript("dataNum=5000;insert into tradesTable values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + msg1 := poller1.Poll(10000, 10000) + msg2 := poller2.Poll(10000, 1000) + if msg1 == nil { + continue + } else if len(msg1) > 0 { + So(len(msg1), ShouldBeGreaterThanOrEqualTo, 1000) + } + + if msg2 == nil { + continue + } else if len(msg2) > 0 { + So(len(msg2), ShouldBeGreaterThanOrEqualTo, 1000) + } + } + err = pc.UnSubscribe(req1) + So(err, ShouldBeNil) + err = pc.UnSubscribe(req2) + So(err, ShouldBeNil) + So(ddb.Close(), ShouldBeNil) + }) +} + +func TestSubsribe_take(t *testing.T) { + Convey("TestSubsribe_take", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + script := "try{ dropStreamTable(`tradesTable1)}catch(ex){};" + + "share streamTable(10000:0,`timev`sym`pricev, [TIMESTAMP,SYMBOL,DOUBLE]) as tradesTable1;" + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + req := &streaming.SubscribeRequest{ + Address: setup.Address, + TableName: "tradesTable1", + ActionName: "subtrades1", + Offset: 0, + Reconnect: false, + } + poller3, err := pc.Subscribe(req) + So(err, ShouldBeNil) + _, err = ddb.RunScript("dataNum=1; insert into tradesTable1 values(take(now(), dataNum), take(`000905`600001`300201`000908`600002, dataNum), rand(1000,dataNum)/10.0);") + So(err, ShouldBeNil) + IMessage := poller3.Take() + Topicmsg := IMessage.GetTopic() + fmt.Println(Topicmsg) + SubscriptionTopic, err := ddb.RunScript("getSubscriptionTopic(tableName=\"tradesTable1\", actionName=\"subtrades1\")") + exTopic := SubscriptionTopic.(*model.Vector).Get(0).String() + So(err, ShouldBeNil) + So(exTopic, ShouldEqual, "string("+Topicmsg+")") + Offset := IMessage.GetOffset() + So(Offset, ShouldEqual, 0) + tmp, err := ddb.RunScript("select * from tradesTable1") + exTable := tmp.(*model.Table) + So(err, ShouldBeNil) + retimev := IMessage.GetValue(0).(*model.Scalar).String() + resymbol := IMessage.GetValue(1).(*model.Scalar).String() + repricev := IMessage.GetValue(2).(*model.Scalar).String() + extimev := exTable.GetColumnByIndex(0).Get(0).String() + exsymbol := exTable.GetColumnByIndex(1).Get(0).String() + expricev := exTable.GetColumnByIndex(2).Get(0).String() + retimev1 := IMessage.GetValueByName("timev").String() + resymbol1 := IMessage.GetValueByName("sym").String() + repricev1 := IMessage.GetValueByName("pricev").String() + So(retimev, ShouldEqual, "timestamp("+extimev+")") + So(resymbol, ShouldEqual, "string("+exsymbol+")") + So(repricev, ShouldEqual, "double("+expricev+")") + So(retimev1, ShouldEqual, "timestamp("+extimev+")") + So(resymbol1, ShouldEqual, "string("+exsymbol+")") + So(repricev1, ShouldEqual, "double("+expricev+")") + err = pc.UnSubscribe(req) + So(err, ShouldBeNil) + So(ddb.Close(), ShouldBeNil) + }) +} + +func TestPollingClientClose(t *testing.T) { + Convey("TestPollingClientClose", t, func() { + IsClosed := pc.IsClosed() + So(IsClosed, ShouldBeFalse) + pc.Close() + IsClosed = pc.IsClosed() + So(IsClosed, ShouldBeTrue) + }) +} diff --git a/test/streaming/util.go b/test/streaming/util.go new file mode 100644 index 0000000..b471b0d --- /dev/null +++ b/test/streaming/util.go @@ -0,0 +1,703 @@ +package test + +import ( + "context" + "fmt" + "reflect" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" +) + +const ( + DfsDBPath = "dfs://test_dfsTable" + TbName1 = "tb1" + TbName2 = "tb2" + DfsTBName1 = "pt1" + DfsTBName2 = "pt2" + DiskDBPath = setup.WORKDIR + `/testTable` + DBhandler = "db" + MemTableName = "memTable" +) + +func AssertNil(err error) { + if err != nil { + panic(fmt.Sprintf("err is not nil: %s", err.Error())) + } +} + +func AssertEqual(s, d interface{}) { + if !reflect.DeepEqual(s, d) { + panic(fmt.Sprintf("%v != %v", s, d)) + } +} + +func LoadTextFileName(ddb api.DolphinDB, remoteFilePath string) (*api.Table, error) { + t := new(api.LoadTextRequest). + SetFileName(remoteFilePath) + di, err := ddb.LoadText(t) + return di, err +} + +func LoadTextDelimiter(ddb api.DolphinDB, remoteFilePath string, delimiter string) (*api.Table, error) { + t := new(api.LoadTextRequest). + SetFileName(remoteFilePath).SetDelimiter(delimiter) + di, err := ddb.LoadText(t) + return di, err +} + +func PloadTextFileName(ddb api.DolphinDB, remoteFilePath string) (*api.Table, error) { + t := new(api.PloadTextRequest). + SetFileName(remoteFilePath) + di, err := ddb.PloadText(t) + return di, err +} + +func PloadTextDelimiter(ddb api.DolphinDB, remoteFilePath string, delimiter string) (*api.Table, error) { + t := new(api.PloadTextRequest). + SetFileName(remoteFilePath).SetDelimiter(delimiter) + di, err := ddb.PloadText(t) + return di, err +} + +func CompareTablesDataformTable(tableName1 *model.Table, tableName2 *api.Table) bool { + re2 := tableName2.Data + if tableName1.Columns() == re2.Columns() && tableName1.GetDataTypeString() == re2.GetDataTypeString() && tableName1.GetDataForm() == re2.GetDataForm() { + for i := 0; i < tableName1.Columns(); i++ { + reTable1 := tableName1.GetColumnByName(tableName1.GetColumnNames()[i]).Data.Value() + reTable2 := tableName2.Data.GetColumnByName(tableName2.Data.GetColumnNames()[i]).Data.Value() + for i := 0; i < tableName1.Rows(); i++ { + if reTable1[i] == reTable2[i] { + continue + } else { + return false + } + } + } + return true + } + return false +} + +func CompareTables(tableName1 *api.Table, tableName2 *api.Table) bool { + re1 := tableName1.Data + re2 := tableName2.Data + if re1.Columns() == re2.Columns() && re1.GetDataTypeString() == re2.GetDataTypeString() && re1.GetDataForm() == re2.GetDataForm() { + for i := 0; i < tableName1.Data.Columns(); i++ { + reTable1 := tableName1.Data.GetColumnByName(tableName1.Data.GetColumnNames()[i]).Data.Value() + reTable2 := tableName2.Data.GetColumnByName(tableName2.Data.GetColumnNames()[i]).Data.Value() + for i := 0; i < tableName1.Data.Rows(); i++ { + if reTable1[i] == reTable2[i] { + continue + } else { + return false + } + } + } + return true + } + return false +} + +func DropDatabase(ddb api.DolphinDB, dbPath string) error { + t := new(api.DropDatabaseRequest). + SetDirectory(dbPath) + err := ddb.DropDatabase(t) + return err +} + +func ExistsDatabase(ddb api.DolphinDB, dbPath string) (bool, error) { + d := new(api.ExistsDatabaseRequest). + SetPath(dbPath) + b, err := ddb.ExistsDatabase(d) + return b, err +} + +func CreateMemTable(ddb api.DolphinDB, tableName string, colName1 string, colName2 string, colName3 string, colName4 string, dataList1 string, dataList2 string, dataList3 string, dataList4 string) (*api.Table, error) { + l := new(api.TableRequest). + SetTableName(tableName). + AddTableParam(colName1, dataList1). + AddTableParam(colName2, dataList2). + AddTableParam(colName3, dataList3). + AddTableParam(colName4, dataList4) + t, err := ddb.Table(l) + return t, err +} + +func CreateTableWithCapacity(ddb api.DolphinDB, tableName string, capcity int32, size int32, colName []string, typeName []string) (*api.Table, error) { + l := new(api.TableWithCapacityRequest). + SetTableName(tableName).SetCapacity(capcity).SetSize(size). + SetColNames(colName). + SetColTypes(typeName) + t, err := ddb.TableWithCapacity(l) + return t, err +} + +func ExistsTable(ddb api.DolphinDB, dbPath string, tableName string) (bool, error) { + l := new(api.ExistsTableRequest). + SetDBPath(dbPath). + SetTableName(tableName) + b, err := ddb.ExistsTable(l) + return b, err +} + +func SaveTable(ddb api.DolphinDB, dbPath string, tableName string, dbhandler string) error { + l := new(api.SaveTableRequest). + SetTableName(tableName). + SetDBPath(dbPath). + SetDBHandle(dbhandler) + err := ddb.SaveTable(l) + return err +} + +func DropTable(ddb api.DolphinDB, tableName string, dfsDBPath string) error { + t := new(api.DropTableRequest). + SetTableName(tableName). + SetDBPath(dfsDBPath) + err := ddb.DropTable(t) + return err +} + +func LoadTable(ddb api.DolphinDB, tableName string, dbPath string) (*api.Table, error) { + t := new(api.LoadTableRequest). + SetTableName(tableName). + SetDatabase(dbPath) + df, err := ddb.LoadTable(t) + return df, err +} + +func LoadTablePartitions(ddb api.DolphinDB, tableName string, dbPath string, partitions string) (*api.Table, error) { + t := new(api.LoadTableRequest). + SetTableName(tableName). + SetDatabase(dbPath). + SetPartitions(partitions) + df, err := ddb.LoadTable(t) + return df, err +} + +func LoadTableMemoryMode(ddb api.DolphinDB, tableName string, dbPath string, memoryMode bool) (*api.Table, error) { + t := new(api.LoadTableRequest). + SetTableName(tableName). + SetDatabase(dbPath). + SetMemoryMode(memoryMode) + df, err := ddb.LoadTable(t) + return df, err +} + +func LoadTableBySQL(ddb api.DolphinDB, na string, loadSQL string, dbPath string, partitionedTableName string) (*api.Table, error) { + t := new(api.LoadTableBySQLRequest). + SetSQL(fmt.Sprintf(loadSQL, na)). + SetDBPath(dbPath). + SetTableName(partitionedTableName) + df, err := ddb.LoadTableBySQL(t) + return df, err +} + +func Database(ddb api.DolphinDB, dbPath string, dbhandler string) (*api.Database, error) { + d := new(api.DatabaseRequest). + SetDirectory(dbPath). + SetDBHandle(dbhandler) + dt, err := ddb.Database(d) + return dt, err +} + +func CreateDatabase(ddb api.DolphinDB, dbPath string, dbhandler string, partitionType string, partitionScheme string, location string, engineType string, atomic string) (*api.Database, error) { + d := new(api.DatabaseRequest). + SetDBHandle(dbhandler). + SetDirectory(dbPath). + SetPartitionType(partitionType). + SetPartitionScheme(partitionScheme). + SetEngine(engineType). + SetLocations(location). + SetAtomic(atomic) + dt, err := ddb.Database(d) + return dt, err +} + +func CreateTable(db *api.Database, tableName string, dimensionTableName string) (*api.Table, error) { + c := new(api.CreateTableRequest). + SetSrcTable(tableName). + SetDimensionTableName(dimensionTableName) + t, err := db.CreateTable(c) + return t, err +} + +func CreateDefPartitionedTable(ddb *api.Database, tableName string, partitionedTableName string, partitioncolumns []string) (*api.Table, error) { + c := new(api.CreatePartitionedTableRequest). + SetSrcTable(tableName). + SetPartitionedTableName(partitionedTableName). + SetPartitionColumns(partitioncolumns) + t, err := ddb.CreatePartitionedTable(c) + return t, err +} + +func DropPartition(db api.DolphinDB, partitionedTableName string, dbPath string, partitionPaths string) error { + t := new(api.DropPartitionRequest). + SetPartitionPaths(partitionPaths). + SetDBPath(dbPath). + SetTableName(partitionedTableName) + err := db.DropPartition(t) + return err +} + +func LoadPartitionedTable(db api.DolphinDB, partitionedTableName string, dbPath string) (*api.Table, error) { + t := new(api.LoadTableRequest). + SetTableName(partitionedTableName). + SetDatabase(dbPath) + df, err := db.LoadTable(t) + return df, err +} + +func CreateDfsDimensiondb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, RANGE, 1..10) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createTable(tdata, "` + tableName1 + `").append!(tdata) + db.createTable(tdata, "` + tableName2 + `").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsRangedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, RANGE, 0..10*10000+1) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsRangedbChunkGranularity(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, RANGE, 0..10*10000+1, chunkGranularity="DATABASE") + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsHashdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, HASH, [INT,10]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsHashdbChunkGranularity(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, HASH, [INT,10], chunkGranularity="DATABASE") + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsValuedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db=database(dbPath, VALUE, 2010.01.01..2010.01.30) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","date").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","date").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsValuedbChunkGranularity(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db=database(dbPath, VALUE, 2010.01.01..2010.01.30, chunkGranularity="DATABASE") + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","date").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","date").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsListdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, LIST, [["AMD", "QWE", "CES"],["DOP", "ASZ"],["FSD", "BBVC"],["AWQ", "DS"]]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","sym").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","sym").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsListdbChunkGranularity(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, LIST, [["AMD", "QWE", "CES"],["DOP", "ASZ"],["FSD", "BBVC"],["AWQ", "DS"]], chunkGranularity="DATABASE") + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","sym").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","sym").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsCompoRangeRangedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db1=database('', RANGE, 2010.01M+0..12) + db2=database('', RANGE, 1 3 5 7 9 11) + db=database(dbPath, COMPO, [db1,db2]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["date", "id"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["date", "id"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsCompoRangeRangedbChunkGranularity(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db1=database('', RANGE, 2010.01M+0..12) + db2=database('', RANGE, 1 3 5 7 9 11) + db=database(dbPath, COMPO, [db1,db2], chunkGranularity="DATABASE") + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["date", "id"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["date", "id"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsCompoRangeValuedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db1=database('', RANGE, 0..10*10000+1) + db2=database('', VALUE, 2010.01.01..2010.01.30) + db=database(dbPath, COMPO, [db1, db2]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["id", "date"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["id", "date"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsCompoRangeHashdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db1=database('', RANGE, 2010.01M+0..12) + db2=database('', HASH, [INT, 10]) + db=database(dbPath, COMPO, [db1, db2]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["date", "id"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["date", "id"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsCompoRangeListdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db1=database('', RANGE, 2010.01M+0..12) + db2=database('', LIST, ["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"]) + db=database(dbPath, COMPO, [db1, db2]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["date", "sym"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["date", "sym"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskUnpartitioneddb(dbPath string, tbName1 string, tbName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db=database(dbPath) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + saveTable(db, tdata, "` + tbName1 + `") + saveTable(db, tdata, "` + tbName2 + `") + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskRangedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db=database(dbPath, RANGE, 0..10*10000+1) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskHashdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db=database(dbPath, HASH, [INT, 10]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskValuedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db=database(dbPath, VALUE, 2010.01.01..2010.01.30) + n=100000 + tdata=table(sort(take(2010.01.01..2010.01.30, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","date").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","date").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskListdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db=database(dbPath,LIST,[["AMD", "QWE", "CES"],["DOP", "ASZ"],["FSD", "BBVC"],["AWQ", "DS"]]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","sym").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","sym").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskCompoRangeRangedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db1=database('', RANGE, 2010.01M+0..12) + db2=database('', RANGE, 1 3 5 7 9 11) + db=database(dbPath, COMPO, [db1, db2]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["date", "id"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["date", "id"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func SaveText(ddb api.DolphinDB, obj string, remoteFilePath string) error { + t := new(api.SaveTextRequest). + SetFileName(remoteFilePath). + SetObj(obj) + err := ddb.SaveText(t) + return err +} + +func CreateDBConnectionPool(threadNumCount int, loadbalance bool) *api.DBConnectionPool { + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: threadNumCount, + LoadBalance: loadbalance, + } + pool, err := api.NewDBConnectionPool(opt) + AssertNil(err) + return pool +} + +func ClearEnv() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + _, err = ddb.RunScript("a = getStreamingStat().pubTables\n" + + "for(i in a){\n" + + "\ttry{stopPublishTable(i.subscriber.split(\":\")[0],int(i.subscriber.split(\":\")[1]),i.tableName,i.actions)}catch(ex){}\n" + + "}") + AssertNil(err) + _, err = ddb.RunScript("def getAllShare(){\n" + + "\treturn select name from objs(true) where shared=1\n" + + "\t}\n" + + "\n" + + "def clearShare(){\n" + + "\tlogin(`admin,`123456)\n" + + "\tallShare=exec name from pnodeRun(getAllShare)\n" + + "\tfor(i in allShare){\n" + + "\t\ttry{\n" + + "\t\t\trpc((exec node from pnodeRun(getAllShare) where name =i)[0],clearTablePersistence,objByName(i))\n" + + "\t\t\t}catch(ex1){}\n" + + "\t\trpc((exec node from pnodeRun(getAllShare) where name =i)[0],undef,i,SHARED)\n" + + "\t}\n" + + "\ttry{\n" + + "\t\tPST_DIR=rpc(getControllerAlias(),getDataNodeConfig{getNodeAlias()})['persistenceDir']\n" + + "\t}catch(ex1){}\n" + + "}\n" + + "clearShare()") + AssertNil(err) + err = ddb.Close() + AssertNil(err) +} + +func ClearStreamTable(tableName string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + script := "login(`admin,`123456);" + + "try{dropStreamTable('" + tableName + "')}catch(ex){};" + _, err = ddb.RunScript(script) + AssertNil(err) + err = ddb.Close() + AssertNil(err) +} + +func CheckmodelTableEqual(t1 *model.Table, t2 *model.Table, n int) bool { + for i := 0; i < t1.Rows(); i++ { + for j := 0; j < len(t1.GetColumnNames()); j++ { + if t1.GetColumnByIndex(j).Get(i).Value() != t2.GetColumnByIndex(j).Get(n+i).Value() { + return false + } + } + } + return true +} diff --git a/test/table_test.go b/test/table_test.go new file mode 100644 index 0000000..311860b --- /dev/null +++ b/test/table_test.go @@ -0,0 +1,604 @@ +package test + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" + "github.com/stretchr/testify/assert" +) + +func StringToBytes(data string) []byte { + return []byte(data) +} + +func TestTableDataType(t *testing.T) { + Convey("Test table prepare", t, func() { + db, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test table only one rows:", func() { + Convey("Test table integer type:", func() { + s, err := db.RunScript(` + table([68] as intv, + long([-94]) as longv, + short([65]) as shortv, + char([0]) as charv, + [true] as boolv + )`) + So(err, ShouldBeNil) + memTable := s.(*model.Table) + Convey("Test table int type:", func() { + reint := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reint.GetDataType(), ShouldEqual, model.DtInt) + So(reint.GetDataForm(), ShouldResemble, model.DfVector) + So(reint.Rows(), ShouldEqual, 1) + re := reint.Data.Value() + tmp := []int{68} + for i := 0; i < reint.Rows(); i++ { + So(re[i], ShouldEqual, tmp[i]) + } + }) + Convey("Test table long type:", func() { + relong := memTable.GetColumnByName(memTable.GetColumnNames()[1]) + So(relong.GetDataType(), ShouldEqual, model.DtLong) + So(relong.GetDataForm(), ShouldResemble, model.DfVector) + So(relong.Rows(), ShouldEqual, 1) + re := relong.Data.Value() + tmp := []int64{-94} + for i := 0; i < relong.Rows(); i++ { + So(re[i], ShouldEqual, tmp[i]) + } + }) + Convey("Test table short type:", func() { + reshort := memTable.GetColumnByName(memTable.GetColumnNames()[2]) + So(reshort.GetDataType(), ShouldEqual, model.DtShort) + So(reshort.GetDataForm(), ShouldResemble, model.DfVector) + So(reshort.Rows(), ShouldEqual, 1) + re := reshort.Data.Value() + tmp := []int16{65} + for i := 0; i < reshort.Rows(); i++ { + So(re[i], ShouldEqual, tmp[i]) + } + }) + Convey("Test table char type:", func() { + rechar := memTable.GetColumnByName(memTable.GetColumnNames()[3]) + So(rechar.GetDataType(), ShouldEqual, model.DtChar) + So(rechar.GetDataForm(), ShouldResemble, model.DfVector) + So(rechar.Rows(), ShouldEqual, 1) + re := rechar.Data.Value() + tmp := []byte{0} + for i := 0; i < rechar.Rows(); i++ { + So(re[i], ShouldEqual, tmp[i]) + } + }) + Convey("Test table bool type:", func() { + rebool := memTable.GetColumnByName(memTable.GetColumnNames()[4]) + So(rebool.GetDataType(), ShouldEqual, model.DtBool) + So(rebool.GetDataForm(), ShouldResemble, model.DfVector) + So(rebool.Rows(), ShouldEqual, 1) + re := rebool.Data.Value() + tmp := []bool{true} + for i := 0; i < rebool.Rows(); i++ { + So(re[i], ShouldEqual, tmp[i]) + } + }) + }) + Convey("Test table string and symbol type:", func() { + s, err := db.RunScript(` + table(symbol(["AAPL"]) as sym, + "A" + string(1) as stringv)`) + So(err, ShouldBeNil) + memTable := s.(*model.Table) + Convey("Test table symbol type:", func() { + resym := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(resym.GetDataType(), ShouldEqual, model.DtSymbol) + So(resym.GetDataForm(), ShouldResemble, model.DfVector) + So(resym.Rows(), ShouldEqual, 1) + re := resym.Data.Value() + tmp := []string{"AAPL"} + for i := 0; i < resym.Rows(); i++ { + So(re[i], ShouldEqual, tmp[i]) + } + }) + Convey("Test table string type:", func() { + reString := memTable.GetColumnByName(memTable.GetColumnNames()[1]) + So(reString.GetDataType(), ShouldEqual, model.DtString) + So(reString.GetDataForm(), ShouldResemble, model.DfVector) + So(reString.Rows(), ShouldEqual, 1) + re := reString.Data.Value() + tmp := []string{"A1"} + for i := 0; i < reString.Rows(); i++ { + So(re[i], ShouldEqual, tmp[i]) + } + }) + }) + Convey("Test table temporal type:", func() { + s, err := db.RunScript(` + table([1970.01.06] as datev, + [1970.01.01T00:01:34] as datetimev, + [1969.12.31T23:59:59.946] as timestampv, + [1968.01M] as month, + [00:00:00.007] as timev, + [00:01:02] as secondv, + [00:35m] as minutev, + [00:00:00.000000032] as nanotimev, + [1969.12.31T23:59:59.999999942] as nanotimestampv)`) + So(err, ShouldBeNil) + memTable := s.(*model.Table) + Convey("Test table date type:", func() { + redate := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(redate.GetDataType(), ShouldEqual, model.DtDate) + So(redate.GetDataForm(), ShouldResemble, model.DfVector) + So(redate.Rows(), ShouldEqual, 1) + re := redate.Data.Value() + datev := time.Date(1970, time.January, 06, 0, 0, 0, 0, time.UTC) + tmp := []time.Time{datev} + for i := 0; i < redate.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + Convey("Test table datetime type:", func() { + redatetime := memTable.GetColumnByName(memTable.GetColumnNames()[1]) + So(redatetime.GetDataType(), ShouldEqual, model.DtDatetime) + So(redatetime.GetDataForm(), ShouldResemble, model.DfVector) + So(redatetime.Rows(), ShouldEqual, 1) + re := redatetime.Data.Value() + datetimev := time.Date(1970, time.January, 01, 0, 01, 34, 0, time.UTC) + tmp := []time.Time{datetimev} + for i := 0; i < redatetime.Rows(); i++ { + assert.Equal(t, re[i], tmp[i]) + } + }) + }) + }) + Convey("Test table insert into one rows", func() { + Convey("Test table insert into int and long and short and char and bool rows", func() { + _, err := db.RunScript(`t=table(100:0, ["id", "longv", "shortv", "charv", "boolv"], + [INT, LONG, SHORT, CHAR, BOOL])`) + So(err, ShouldBeNil) + var id int32 = 10 + var longv int64 = 11 + var shortv int16 = 9 + var charv byte = 23 + var boolv bool = true + _, err = db.RunScript(fmt.Sprintf("insert into t values(%v, %v, %v, %v, %v)", id, longv, shortv, charv, boolv)) + So(err, ShouldBeNil) + s, err := db.RunScript("t") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + for _, i := range memTable.GetColumnNames() { + col := memTable.GetColumnByName(i) + So(col.GetDataForm(), ShouldResemble, model.DfVector) + So(col.Rows(), ShouldEqual, 1) + } + reint := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reint.GetDataType(), ShouldEqual, model.DtInt) + assert.Equal(t, reint.String(), "vector([10])") + relong := memTable.GetColumnByName(memTable.GetColumnNames()[1]) + So(relong.GetDataType(), ShouldEqual, model.DtLong) + assert.Equal(t, relong.String(), "vector([11])") + reshort := memTable.GetColumnByName(memTable.GetColumnNames()[2]) + So(reshort.GetDataType(), ShouldEqual, model.DtShort) + assert.Equal(t, reshort.String(), "vector([9])") + rechar := memTable.GetColumnByName(memTable.GetColumnNames()[3]) + So(rechar.GetDataType(), ShouldEqual, model.DtChar) + assert.Equal(t, rechar.String(), "vector([23])") + rebool := memTable.GetColumnByName(memTable.GetColumnNames()[4]) + So(rebool.GetDataType(), ShouldEqual, model.DtBool) + assert.Equal(t, rebool.String(), "vector([true])") + }) + Convey("Test table insert into doublev and floatv rows", func() { + _, err := db.RunScript(`t=table(100:0, ["doublev", "floatv"], [DOUBLE, FLOAT])`) + So(err, ShouldBeNil) + var doublev float64 = 22.8 + var floatv float32 = 10.5 + _, err = db.RunScript(fmt.Sprintf("insert into t values(%v, %v)", doublev, floatv)) + So(err, ShouldBeNil) + s, err := db.RunScript("t") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + for _, i := range memTable.GetColumnNames() { + col := memTable.GetColumnByName(i) + So(col.GetDataForm(), ShouldResemble, model.DfVector) + So(col.Rows(), ShouldEqual, 1) + } + redouble := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(redouble.GetDataType(), ShouldEqual, model.DtDouble) + assert.Equal(t, redouble.String(), "vector([22.8])") + refloat := memTable.GetColumnByName(memTable.GetColumnNames()[1]) + So(refloat.GetDataType(), ShouldEqual, model.DtFloat) + assert.Equal(t, refloat.String(), "vector([10.5])") + }) + Convey("Test table insert into symbol and string rows", func() { + _, err := db.RunScript(`t=table(100:0, ["sym", "stringv"], [SYMBOL, STRING])`) + So(err, ShouldBeNil) + var symv string = "AAPL" + var colv string = "A1" + _, err = db.RunScript(fmt.Sprintf("insert into t values('%s', '%s')", symv, colv)) + So(err, ShouldBeNil) + s, err := db.RunScript("t") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + for _, i := range memTable.GetColumnNames() { + col := memTable.GetColumnByName(i) + So(col.GetDataForm(), ShouldResemble, model.DfVector) + So(col.Rows(), ShouldEqual, 1) + } + resymbol := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(resymbol.GetDataType(), ShouldEqual, model.DtSymbol) + So(resymbol.String(), ShouldEqual, "vector([AAPL])") + reString := memTable.GetColumnByName(memTable.GetColumnNames()[1]) + So(reString.GetDataType(), ShouldEqual, model.DtString) + So(reString.String(), ShouldEqual, "vector([A1])") + }) + Convey("Test table insert into temporal rows", func() { + _, err := db.RunScript(`t=table(100:0, ["datev", "datetimev", "timestampv", "nanotimestampv", "datehourv", "monthv", "timev", "secondv", "minutev", "nanotimev"], + [DATE, DATETIME, TIMESTAMP, NANOTIMESTAMP, DATEHOUR, MONTH, TIME, SECOND, MINUTE, NANOTIME])`) + So(err, ShouldBeNil) + datev := time.Date(1969, time.December, 31, 0, 0, 0, 0, time.UTC) + datec := datev.Format("2006.01.02T15:04:05.000") + datetimev := time.Date(1969, time.December, 31, 23, 56, 59, 0, time.UTC) + datetimec := datetimev.Format("2006.01.02T15:04:05.000") + timestampv := time.Date(1969, time.December, 31, 23, 56, 59, 123*1000000, time.UTC) + timestampc := timestampv.Format("2006.01.02T15:04:05.000") + nanotimestampv := time.Date(1969, time.December, 31, 23, 56, 59, 123000999, time.UTC) + nanotimestampc := nanotimestampv.Format("2006.01.02T15:04:05.000000000") + datehourv := time.Date(1969, time.December, 31, 23, 00, 00, 0, time.UTC) + datehourc := datehourv.Format("2006.01.02T15:00:00.000") + monthv := time.Date(1969, time.December, 31, 0, 0, 0, 0, time.UTC) + monthc := monthv.Format("2006.01.01T00:00:00.000") + timev := time.Date(1970, time.January, 1, 23, 56, 59, 123*1000000, time.UTC) + timec := timev.Format("2006.01.02T15:04:05.000") + secondv := time.Date(1970, time.January, 1, 23, 56, 59, 0, time.UTC) + secondc := secondv.Format("15:04:05.000") + minutev := time.Date(1970, time.January, 1, 23, 56, 0, 0, time.UTC) + minutec := minutev.Format("15:04m") + nanotimev := time.Date(1970, time.January, 1, 23, 56, 59, 123123456, time.UTC) + nanotimec := nanotimev.Format("2006.01.02T15:04:05.000000000") + _, err = db.RunScript(fmt.Sprintf("insert into t values(%s, %s, %s, %s, %s, %s, %s, %s, %s,%s)", + datec, datetimec, timestampc, nanotimestampc, datehourc, monthc, timec, secondc, minutec, nanotimec)) + So(err, ShouldBeNil) + s, err := db.RunScript("t") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + for _, i := range memTable.GetColumnNames() { + col := memTable.GetColumnByName(i) + So(col.GetDataForm(), ShouldResemble, model.DfVector) + So(col.Rows(), ShouldEqual, 1) + } + redate := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(redate.GetDataType(), ShouldEqual, model.DtDate) + re := redate.Data.Value() + tmp := []time.Time{datev} + assert.Equal(t, re[0], tmp[0]) + redatetime := memTable.GetColumnByName(memTable.GetColumnNames()[1]) + So(redatetime.GetDataType(), ShouldEqual, model.DtDatetime) + re = redatetime.Data.Value() + tmp = []time.Time{datetimev} + assert.Equal(t, re[0], tmp[0]) + retimestamp := memTable.GetColumnByName(memTable.GetColumnNames()[2]) + So(retimestamp.GetDataType(), ShouldEqual, model.DtTimestamp) + re = retimestamp.Data.Value() + tmp = []time.Time{timestampv} + assert.Equal(t, re[0], tmp[0]) + renanotimestamp := memTable.GetColumnByName(memTable.GetColumnNames()[3]) + So(renanotimestamp.GetDataType(), ShouldEqual, model.DtNanoTimestamp) + re = renanotimestamp.Data.Value() + tmp = []time.Time{nanotimestampv} + assert.Equal(t, re[0], tmp[0]) + redatehour := memTable.GetColumnByName(memTable.GetColumnNames()[4]) + So(redatehour.GetDataType(), ShouldEqual, model.DtDateHour) + re = redatehour.Data.Value() + tmp = []time.Time{datehourv} + assert.Equal(t, re[0], tmp[0]) + remonth := memTable.GetColumnByName(memTable.GetColumnNames()[5]) + So(remonth.GetDataType(), ShouldEqual, model.DtMonth) + re = redate.Data.Value() + tmp = []time.Time{monthv} + assert.Equal(t, re[0], tmp[0]) + retime := memTable.GetColumnByName(memTable.GetColumnNames()[6]) + So(retime.GetDataType(), ShouldEqual, model.DtTime) + re = retime.Data.Value() + tmp = []time.Time{timev} + assert.Equal(t, re[0], tmp[0]) + resecond := memTable.GetColumnByName(memTable.GetColumnNames()[7]) + So(resecond.GetDataType(), ShouldEqual, model.DtSecond) + re = resecond.Data.Value() + tmp = []time.Time{secondv} + assert.Equal(t, re[0], tmp[0]) + reminute := memTable.GetColumnByName(memTable.GetColumnNames()[8]) + So(reminute.GetDataType(), ShouldEqual, model.DtMinute) + re = reminute.Data.Value() + tmp = []time.Time{minutev} + assert.Equal(t, re[0], tmp[0]) + renanotime := memTable.GetColumnByName(memTable.GetColumnNames()[9]) + So(renanotime.GetDataType(), ShouldEqual, model.DtNanoTime) + re = renanotime.Data.Value() + tmp = []time.Time{nanotimev} + assert.Equal(t, re[0], tmp[0]) + }) + Convey("Test table insert into special type rows", func() { + _, err := db.RunScript(`t=table(100:0, ["uuidv", "int128v", "blobv", "ipv"], [UUID, INT128, BLOB, IPADDR])`) + So(err, ShouldBeNil) + uuidv := `uuid("7d943e7f-5660-e015-a895-fa4da2b36c43")` + int128v := `int128("7667974ea2fb155252559cc28b4a8efa")` + ipaddrv := `ipaddr("a9b7:f65:9be1:20fd:741a:97ac:6ce5:1dd")` + blobv := `blob("ALMS")` + _, err = db.RunScript(fmt.Sprintf("insert into t values(%s, %s, %s, %s)", uuidv, int128v, blobv, ipaddrv)) + So(err, ShouldBeNil) + s, err := db.RunScript("t") + So(err, ShouldBeNil) + memTable := s.(*model.Table) + for _, i := range memTable.GetColumnNames() { + col := memTable.GetColumnByName(i) + So(col.GetDataForm(), ShouldResemble, model.DfVector) + So(col.Rows(), ShouldEqual, 1) + } + reuuid := memTable.GetColumnByName(memTable.GetColumnNames()[0]) + So(reuuid.GetDataType(), ShouldEqual, model.DtUUID) + So(reuuid.String(), ShouldEqual, "vector([7d943e7f-5660-e015-a895-fa4da2b36c43])") + reint128 := memTable.GetColumnByName(memTable.GetColumnNames()[1]) + So(reint128.GetDataType(), ShouldEqual, model.DtInt128) + So(reint128.String(), ShouldEqual, "vector([7667974ea2fb155252559cc28b4a8efa])") + reblob := memTable.GetColumnByName(memTable.GetColumnNames()[2]) + So(reblob.GetDataType(), ShouldEqual, model.DtBlob) + re := reblob.Data.Value() + tmp := StringToBytes("ALMS") + So(re[0], ShouldResemble, tmp) + }) + }) + }) +} + +func TestTableWithCapacity(t *testing.T) { + Convey("Test_function_TableWithCapacity_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Drop all Databases", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + re, err := ddb.RunScript(`existsDatabase("` + dbPath + `")`) + So(err, ShouldBeNil) + isExitsDatabase := re.(*model.Scalar).DataType.Value() + So(isExitsDatabase, ShouldBeFalse) + } + }) + Convey("Test_function_TableWithCapacityRequest_SetSize_10", func() { + l := new(api.TableWithCapacityRequest). + SetTableName(MemTableName).SetCapacity(100).SetSize(10). + SetColNames([]string{"id", "datev", "str"}). + SetColTypes([]string{"INT", "DATE", "STRING"}) + t, err := ddb.TableWithCapacity(l) + So(err, ShouldBeNil) + originID := t.Data.GetColumnByName("id") + originDatev := t.Data.GetColumnByName("datev") + So(originID.String(), ShouldEqual, "vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])") + So(originDatev.String(), ShouldEqual, "vector([1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01])") + _, err = ddb.RunScript(`t=table(1..10 as id, 1969.12.26+ 1..10 as datev, "A"+string(1..10) as str); ` + MemTableName + `.append!(t)`) + So(err, ShouldBeNil) + reTmp, err := ddb.RunScript(`select * from ` + MemTableName + ``) + So(err, ShouldBeNil) + reTable := reTmp.(*model.Table) + reID := reTable.GetColumnByName("id") + reDatev := reTable.GetColumnByName("datev") + reStr := reTable.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(reDatev.String(), ShouldEqual, "vector([1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1970.01.01, 1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(reStr.String(), ShouldEqual, "vector([, , , , , , , , , , A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + + Convey("Test_function_TableWithCapacityRequest_SetSize_0", func() { + l := new(api.TableWithCapacityRequest). + SetTableName(MemTableName).SetCapacity(100).SetSize(0). + SetColNames([]string{"id", "datev", "str"}). + SetColTypes([]string{"INT", "DATE", "STRING"}) + t, err := ddb.TableWithCapacity(l) + So(err, ShouldBeNil) + originID := t.Data.GetColumnByName("id") + originDatev := t.Data.GetColumnByName("datev") + originstr := t.Data.GetColumnByName("str") + So(originID.String(), ShouldEqual, "vector([])") + So(originDatev.String(), ShouldEqual, "vector([])") + So(originstr.String(), ShouldEqual, "vector([])") + _, err = ddb.RunScript(`t=table(1..10 as id, 1969.12.26+ 1..10 as datev, "A"+string(1..10) as str); ` + MemTableName + `.append!(t)`) + So(err, ShouldBeNil) + reTmp, err := ddb.RunScript(`select * from ` + MemTableName + ``) + So(err, ShouldBeNil) + reTable := reTmp.(*model.Table) + reID := reTable.GetColumnByName("id") + reDatev := reTable.GetColumnByName("datev") + reStr := reTable.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(reDatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(reStr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + Convey("Test_function_TableWithCapacityRequest_SetCapacity_1023", func() { + l := new(api.TableWithCapacityRequest). + SetTableName(MemTableName).SetCapacity(1023).SetSize(0). + SetColNames([]string{"id", "datev", "str"}). + SetColTypes([]string{"INT", "DATE", "STRING"}) + t, err := ddb.TableWithCapacity(l) + So(err, ShouldBeNil) + originID := t.Data.GetColumnByName("id") + originDatev := t.Data.GetColumnByName("datev") + originstr := t.Data.GetColumnByName("str") + So(originID.String(), ShouldEqual, "vector([])") + So(originDatev.String(), ShouldEqual, "vector([])") + So(originstr.String(), ShouldEqual, "vector([])") + _, err = ddb.RunScript(`t=table(1..10 as id, 1969.12.26+ 1..10 as datev, "A"+string(1..10) as str); ` + MemTableName + `.append!(t)`) + So(err, ShouldBeNil) + reTmp, err := ddb.RunScript(`select * from ` + MemTableName + ``) + So(err, ShouldBeNil) + reTable := reTmp.(*model.Table) + reID := reTable.GetColumnByName("id") + reDatev := reTable.GetColumnByName("datev") + reStr := reTable.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(reDatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(reStr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + Convey("Test_function_TableWithCapacityRequest_SetCapacity_1025", func() { + l := new(api.TableWithCapacityRequest). + SetTableName(MemTableName).SetCapacity(1025).SetSize(0). + SetColNames([]string{"id", "datev", "str"}). + SetColTypes([]string{"INT", "DATE", "STRING"}) + t, err := ddb.TableWithCapacity(l) + So(err, ShouldBeNil) + originID := t.Data.GetColumnByName("id") + originDatev := t.Data.GetColumnByName("datev") + originstr := t.Data.GetColumnByName("str") + So(originID.String(), ShouldEqual, "vector([])") + So(originDatev.String(), ShouldEqual, "vector([])") + So(originstr.String(), ShouldEqual, "vector([])") + _, err = ddb.RunScript(`t=table(1..10 as id, 1969.12.26+ 1..10 as datev, "A"+string(1..10) as str); ` + MemTableName + `.append!(t)`) + So(err, ShouldBeNil) + reTmp, err := ddb.RunScript(`select * from ` + MemTableName + ``) + So(err, ShouldBeNil) + reTable := reTmp.(*model.Table) + reID := reTable.GetColumnByName("id") + reDatev := reTable.GetColumnByName("datev") + reStr := reTable.GetColumnByName("str") + So(reID.String(), ShouldEqual, "vector([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])") + So(reDatev.String(), ShouldEqual, "vector([1969.12.27, 1969.12.28, 1969.12.29, 1969.12.30, 1969.12.31, 1970.01.01, 1970.01.02, 1970.01.03, 1970.01.04, 1970.01.05])") + So(reStr.String(), ShouldEqual, "vector([A1, A2, A3, A4, A5, A6, A7, A8, A9, A10])") + }) + }) +} + +func TestTable(t *testing.T) { + Convey("Test_function_Table_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Drop all Databases", func() { + dbPaths := []string{DfsDBPath, DiskDBPath} + for _, dbPath := range dbPaths { + script := ` + if(existsDatabase("` + dbPath + `")){ + dropDatabase("` + dbPath + `") + } + if(exists("` + dbPath + `")){ + rmdir("` + dbPath + `", true) + } + ` + _, err = ddb.RunScript(script) + So(err, ShouldBeNil) + re, err := ddb.RunScript(`existsDatabase("` + dbPath + `")`) + So(err, ShouldBeNil) + isExitsDatabase := re.(*model.Scalar).DataType.Value() + So(isExitsDatabase, ShouldBeFalse) + } + }) + Convey("Test_function_Table", func() { + newTable := new(api.TableRequest). + SetTableName(MemTableName). + AddTableParam("id", "`XOM`GS`AAPL"). + AddTableParam("x", "102.1 33.4 73.6") + origintable, err := ddb.Table(newTable) + So(err, ShouldBeNil) + reTable, err := ddb.RunScript("select * from " + MemTableName + "") + reTablex := reTable.(*model.Table) + So(err, ShouldBeNil) + res := CompareTablesDataformTable(reTablex, origintable) + So(res, ShouldBeTrue) + }) + Convey("Test_function_Table_1023", func() { + newTable := new(api.TableRequest). + SetTableName(MemTableName). + AddTableParam("id", "take(`XOM`GS`AAPL, 1023)"). + AddTableParam("x", "take(102.1 33.4 73.6, 1023)") + origintable, err := ddb.Table(newTable) + So(err, ShouldBeNil) + reTable, err := ddb.RunScript("select * from " + MemTableName + "") + reTablex := reTable.(*model.Table) + So(err, ShouldBeNil) + res := CompareTablesDataformTable(reTablex, origintable) + So(res, ShouldBeTrue) + }) + Convey("Test_function_Table_1025", func() { + newTable := new(api.TableRequest). + SetTableName(MemTableName). + AddTableParam("id", "take(`XOM`GS`AAPL, 1025)"). + AddTableParam("x", "take(102.1 33.4 73.6, 1025)") + origintable, err := ddb.Table(newTable) + So(err, ShouldBeNil) + reTable, err := ddb.RunScript("select * from " + MemTableName + "") + reTablex := reTable.(*model.Table) + So(err, ShouldBeNil) + res := CompareTablesDataformTable(reTablex, origintable) + So(res, ShouldBeTrue) + }) + Convey("Test_function_Table_3000000", func() { + newTable := new(api.TableRequest). + SetTableName(MemTableName). + AddTableParam("id", "take(`XOM`GS`AAPL, 3000000)"). + AddTableParam("x", "take(102.1 33.4 73.6, 3000000)") + origintable, err := ddb.Table(newTable) + So(err, ShouldBeNil) + reTable, err := ddb.RunScript("select * from " + MemTableName + "") + reTablex := reTable.(*model.Table) + So(err, ShouldBeNil) + res := CompareTablesDataformTable(reTablex, origintable) + So(res, ShouldBeTrue) + }) + Convey("Test_function_Table_GetHandle", func() { + newTable := new(api.TableRequest). + SetTableName(MemTableName). + AddTableParam("id", "`XOM`GS`AAPL"). + AddTableParam("x", "102.1 33.4 73.6") + origintable, err := ddb.Table(newTable) + So(err, ShouldBeNil) + reTable, err := ddb.RunScript("select * from " + MemTableName + "") + reTablex := reTable.(*model.Table) + So(err, ShouldBeNil) + res := CompareTablesDataformTable(reTablex, origintable) + So(res, ShouldBeTrue) + rehandle := origintable.GetHandle() + So(rehandle, ShouldEqual, MemTableName) + }) + Convey("Test_function_Table_GetSession", func() { + newTable := new(api.TableRequest). + SetTableName(MemTableName). + AddTableParam("id", "`XOM`GS`AAPL"). + AddTableParam("x", "102.1 33.4 73.6") + origintable, err := ddb.Table(newTable) + So(err, ShouldBeNil) + reTable, err := ddb.RunScript("select * from " + MemTableName + "") + reTablex := reTable.(*model.Table) + So(err, ShouldBeNil) + res := CompareTablesDataformTable(reTablex, origintable) + So(res, ShouldBeTrue) + reSession := origintable.GetSession() + So(reSession, ShouldNotBeNil) + }) + Convey("Test_function_Table_String", func() { + newTable := new(api.TableRequest). + SetTableName(MemTableName). + AddTableParam("id", "`XOM`GS`AAPL"). + AddTableParam("x", "102.1 33.4 73.6") + origintable, err := ddb.Table(newTable) + So(err, ShouldBeNil) + reTable, err := ddb.RunScript("select * from " + MemTableName + "") + reTablex := reTable.(*model.Table) + So(err, ShouldBeNil) + res := CompareTablesDataformTable(reTablex, origintable) + So(res, ShouldBeTrue) + retostring := origintable.String() + So(retostring, ShouldEqual, reTable.String()) + }) + }) +} diff --git a/test/undef_test.go b/test/undef_test.go new file mode 100644 index 0000000..b4ab36e --- /dev/null +++ b/test/undef_test.go @@ -0,0 +1,155 @@ +package test + +import ( + "context" + "fmt" + "testing" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" + . "github.com/smartystreets/goconvey/convey" +) + +func CompareTablesTwoDataformTable(tableName1 *model.Table, tableName2 *model.Table) bool { + if tableName1.Columns() == tableName2.Columns() && tableName1.GetDataTypeString() == tableName2.GetDataTypeString() && tableName1.GetDataForm() == tableName2.GetDataForm() { + for i := 0; i < tableName1.Columns(); i++ { + reTable1 := tableName1.GetColumnByName(tableName1.GetColumnNames()[i]).Data.Value() + reTable2 := tableName2.GetColumnByName(tableName2.GetColumnNames()[i]).Data.Value() + for i := 0; i < tableName1.Rows(); i++ { + if reTable1[i] == reTable2[i] { + continue + } else { + return false + } + } + } + return true + } + return false +} +func TestUndef(t *testing.T) { + Convey("Test_func_undef_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_func_undef_varible_data", func() { + _, err = ddb.RunScript("x=1") + So(err, ShouldBeNil) + undefReq := new(api.UndefRequest). + SetObj("`x").SetObjType("VAR") + err = ddb.Undef(undefReq) + So(err, ShouldBeNil) + _, err := ddb.RunScript("x") + So(err, ShouldNotBeNil) + }) + Convey("Test_func_undef_only_SetObj", func() { + _, err = ddb.RunScript("x=1;") + So(err, ShouldBeNil) + undefReq := new(api.UndefRequest). + SetObj("`x") + err = ddb.Undef(undefReq) + So(err, ShouldBeNil) + _, err = ddb.RunScript("x") + So(err, ShouldNotBeNil) + }) + Convey("Test_func_undef_varible_data_SetObj", func() { + _, err = ddb.RunScript("def f(a){return a+1}") + So(err, ShouldBeNil) + undefReq := new(api.UndefRequest). + SetObj("`f").SetObjType("DEF") + err = ddb.Undef(undefReq) + So(err, ShouldBeNil) + _, err = ddb.RunScript("f") + So(err, ShouldNotBeNil) + }) + + Convey("Test_func_undef_varible_data_list", func() { + _, err = ddb.RunScript("x=1;y=short(10)") + So(err, ShouldBeNil) + undefReq := new(api.UndefRequest). + SetObj("`x`y") + err = ddb.Undef(undefReq) + So(err, ShouldBeNil) + res, err := ddb.RunScript("x") + fmt.Println(res) + So(err, ShouldNotBeNil) + _, err = ddb.RunScript("y") + So(err, ShouldNotBeNil) + }) + }) +} + +func TestUndefAll(t *testing.T) { + Convey("Test_func_UndefAll_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_func_UndefAll_varible_data", func() { + _, err = ddb.RunScript("x=1") + So(err, ShouldBeNil) + err = ddb.UndefAll() + So(err, ShouldBeNil) + _, err := ddb.RunScript("x") + So(err, ShouldNotBeNil) + }) + Convey("Test_func_undef_table", func() { + _, err = ddb.RunScript("t=table(1..10 as id)") + So(err, ShouldBeNil) + err = ddb.UndefAll() + So(err, ShouldBeNil) + _, err = ddb.RunScript("t") + So(err, ShouldNotBeNil) + }) + }) +} + +func TestClearAllCache(t *testing.T) { + Convey("Test_func_ClearAllCache_prepare", t, func() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + So(err, ShouldBeNil) + Convey("Test_func_ClearAllCache_SetIsDFS_true", func() { + _, err := ddb.RunScript( + `dbPath = "dfs://PTA_test" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + t = table(100:100, ["sym", "id", "datev", "price"],[SYMBOL, INT, DATE, DOUBLE]) + db=database(dbPath, RANGE, symbol("A"+string(1..6))) + pt = db.createPartitionedTable(t, "pt", "sym") + t=table(["A1", "A2", "A3", "A4", "A5"] as sym, 2 7 12 22 24 as id, 1970.01.01 1969.12.02 1970.03.01 1969.10.02 1970.05.01 as datev, 21.2 4.4 5.5 2.3 6.6 as price) + pt.append!(t)`) + So(err, ShouldBeNil) + orginaltable, err := ddb.RunScript("select * from loadTable(dbPath, \"pt\")") + So(err, ShouldBeNil) + c := new(api.ClearAllCacheRequest). + SetIsDFS(true) + err = ddb.ClearAllCache(c) + So(err, ShouldBeNil) + restable, err := ddb.RunScript("select * from loadTable(dbPath, \"pt\")") + So(err, ShouldBeNil) + re := CompareTablesTwoDataformTable(orginaltable.(*model.Table), restable.(*model.Table)) + So(re, ShouldBeTrue) + }) + + Convey("Test_func_ClearAllCache_SetIsDFS_false", func() { + _, err := ddb.RunScript( + `dbPath = "dfs://PTA_test" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + t = table(100:100, ["sym", "id", "datev", "price"],[SYMBOL, INT, DATE, DOUBLE]) + db=database(dbPath, RANGE, symbol("A"+string(1..6))) + pt = db.createPartitionedTable(t, "pt", "sym") + t=table(["A1", "A2", "A3", "A4", "A5"] as sym, 2 7 12 22 24 as id, 1970.01.01 1969.12.02 1970.03.01 1969.10.02 1970.05.01 as datev, 21.2 4.4 5.5 2.3 6.6 as price) + pt.append!(t)`) + So(err, ShouldBeNil) + orginaltable, err := ddb.RunScript("select * from loadTable(dbPath, \"pt\")") + So(err, ShouldBeNil) + c := new(api.ClearAllCacheRequest). + SetIsDFS(false) + err = ddb.ClearAllCache(c) + So(err, ShouldBeNil) + restable, err := ddb.RunScript("select * from loadTable(dbPath, \"pt\")") + So(err, ShouldBeNil) + re := CompareTablesTwoDataformTable(orginaltable.(*model.Table), restable.(*model.Table)) + So(re, ShouldBeTrue) + }) + }) +} diff --git a/test/util.go b/test/util.go new file mode 100644 index 0000000..b471b0d --- /dev/null +++ b/test/util.go @@ -0,0 +1,703 @@ +package test + +import ( + "context" + "fmt" + "reflect" + + "github.com/dolphindb/api-go/api" + "github.com/dolphindb/api-go/model" + "github.com/dolphindb/api-go/test/setup" +) + +const ( + DfsDBPath = "dfs://test_dfsTable" + TbName1 = "tb1" + TbName2 = "tb2" + DfsTBName1 = "pt1" + DfsTBName2 = "pt2" + DiskDBPath = setup.WORKDIR + `/testTable` + DBhandler = "db" + MemTableName = "memTable" +) + +func AssertNil(err error) { + if err != nil { + panic(fmt.Sprintf("err is not nil: %s", err.Error())) + } +} + +func AssertEqual(s, d interface{}) { + if !reflect.DeepEqual(s, d) { + panic(fmt.Sprintf("%v != %v", s, d)) + } +} + +func LoadTextFileName(ddb api.DolphinDB, remoteFilePath string) (*api.Table, error) { + t := new(api.LoadTextRequest). + SetFileName(remoteFilePath) + di, err := ddb.LoadText(t) + return di, err +} + +func LoadTextDelimiter(ddb api.DolphinDB, remoteFilePath string, delimiter string) (*api.Table, error) { + t := new(api.LoadTextRequest). + SetFileName(remoteFilePath).SetDelimiter(delimiter) + di, err := ddb.LoadText(t) + return di, err +} + +func PloadTextFileName(ddb api.DolphinDB, remoteFilePath string) (*api.Table, error) { + t := new(api.PloadTextRequest). + SetFileName(remoteFilePath) + di, err := ddb.PloadText(t) + return di, err +} + +func PloadTextDelimiter(ddb api.DolphinDB, remoteFilePath string, delimiter string) (*api.Table, error) { + t := new(api.PloadTextRequest). + SetFileName(remoteFilePath).SetDelimiter(delimiter) + di, err := ddb.PloadText(t) + return di, err +} + +func CompareTablesDataformTable(tableName1 *model.Table, tableName2 *api.Table) bool { + re2 := tableName2.Data + if tableName1.Columns() == re2.Columns() && tableName1.GetDataTypeString() == re2.GetDataTypeString() && tableName1.GetDataForm() == re2.GetDataForm() { + for i := 0; i < tableName1.Columns(); i++ { + reTable1 := tableName1.GetColumnByName(tableName1.GetColumnNames()[i]).Data.Value() + reTable2 := tableName2.Data.GetColumnByName(tableName2.Data.GetColumnNames()[i]).Data.Value() + for i := 0; i < tableName1.Rows(); i++ { + if reTable1[i] == reTable2[i] { + continue + } else { + return false + } + } + } + return true + } + return false +} + +func CompareTables(tableName1 *api.Table, tableName2 *api.Table) bool { + re1 := tableName1.Data + re2 := tableName2.Data + if re1.Columns() == re2.Columns() && re1.GetDataTypeString() == re2.GetDataTypeString() && re1.GetDataForm() == re2.GetDataForm() { + for i := 0; i < tableName1.Data.Columns(); i++ { + reTable1 := tableName1.Data.GetColumnByName(tableName1.Data.GetColumnNames()[i]).Data.Value() + reTable2 := tableName2.Data.GetColumnByName(tableName2.Data.GetColumnNames()[i]).Data.Value() + for i := 0; i < tableName1.Data.Rows(); i++ { + if reTable1[i] == reTable2[i] { + continue + } else { + return false + } + } + } + return true + } + return false +} + +func DropDatabase(ddb api.DolphinDB, dbPath string) error { + t := new(api.DropDatabaseRequest). + SetDirectory(dbPath) + err := ddb.DropDatabase(t) + return err +} + +func ExistsDatabase(ddb api.DolphinDB, dbPath string) (bool, error) { + d := new(api.ExistsDatabaseRequest). + SetPath(dbPath) + b, err := ddb.ExistsDatabase(d) + return b, err +} + +func CreateMemTable(ddb api.DolphinDB, tableName string, colName1 string, colName2 string, colName3 string, colName4 string, dataList1 string, dataList2 string, dataList3 string, dataList4 string) (*api.Table, error) { + l := new(api.TableRequest). + SetTableName(tableName). + AddTableParam(colName1, dataList1). + AddTableParam(colName2, dataList2). + AddTableParam(colName3, dataList3). + AddTableParam(colName4, dataList4) + t, err := ddb.Table(l) + return t, err +} + +func CreateTableWithCapacity(ddb api.DolphinDB, tableName string, capcity int32, size int32, colName []string, typeName []string) (*api.Table, error) { + l := new(api.TableWithCapacityRequest). + SetTableName(tableName).SetCapacity(capcity).SetSize(size). + SetColNames(colName). + SetColTypes(typeName) + t, err := ddb.TableWithCapacity(l) + return t, err +} + +func ExistsTable(ddb api.DolphinDB, dbPath string, tableName string) (bool, error) { + l := new(api.ExistsTableRequest). + SetDBPath(dbPath). + SetTableName(tableName) + b, err := ddb.ExistsTable(l) + return b, err +} + +func SaveTable(ddb api.DolphinDB, dbPath string, tableName string, dbhandler string) error { + l := new(api.SaveTableRequest). + SetTableName(tableName). + SetDBPath(dbPath). + SetDBHandle(dbhandler) + err := ddb.SaveTable(l) + return err +} + +func DropTable(ddb api.DolphinDB, tableName string, dfsDBPath string) error { + t := new(api.DropTableRequest). + SetTableName(tableName). + SetDBPath(dfsDBPath) + err := ddb.DropTable(t) + return err +} + +func LoadTable(ddb api.DolphinDB, tableName string, dbPath string) (*api.Table, error) { + t := new(api.LoadTableRequest). + SetTableName(tableName). + SetDatabase(dbPath) + df, err := ddb.LoadTable(t) + return df, err +} + +func LoadTablePartitions(ddb api.DolphinDB, tableName string, dbPath string, partitions string) (*api.Table, error) { + t := new(api.LoadTableRequest). + SetTableName(tableName). + SetDatabase(dbPath). + SetPartitions(partitions) + df, err := ddb.LoadTable(t) + return df, err +} + +func LoadTableMemoryMode(ddb api.DolphinDB, tableName string, dbPath string, memoryMode bool) (*api.Table, error) { + t := new(api.LoadTableRequest). + SetTableName(tableName). + SetDatabase(dbPath). + SetMemoryMode(memoryMode) + df, err := ddb.LoadTable(t) + return df, err +} + +func LoadTableBySQL(ddb api.DolphinDB, na string, loadSQL string, dbPath string, partitionedTableName string) (*api.Table, error) { + t := new(api.LoadTableBySQLRequest). + SetSQL(fmt.Sprintf(loadSQL, na)). + SetDBPath(dbPath). + SetTableName(partitionedTableName) + df, err := ddb.LoadTableBySQL(t) + return df, err +} + +func Database(ddb api.DolphinDB, dbPath string, dbhandler string) (*api.Database, error) { + d := new(api.DatabaseRequest). + SetDirectory(dbPath). + SetDBHandle(dbhandler) + dt, err := ddb.Database(d) + return dt, err +} + +func CreateDatabase(ddb api.DolphinDB, dbPath string, dbhandler string, partitionType string, partitionScheme string, location string, engineType string, atomic string) (*api.Database, error) { + d := new(api.DatabaseRequest). + SetDBHandle(dbhandler). + SetDirectory(dbPath). + SetPartitionType(partitionType). + SetPartitionScheme(partitionScheme). + SetEngine(engineType). + SetLocations(location). + SetAtomic(atomic) + dt, err := ddb.Database(d) + return dt, err +} + +func CreateTable(db *api.Database, tableName string, dimensionTableName string) (*api.Table, error) { + c := new(api.CreateTableRequest). + SetSrcTable(tableName). + SetDimensionTableName(dimensionTableName) + t, err := db.CreateTable(c) + return t, err +} + +func CreateDefPartitionedTable(ddb *api.Database, tableName string, partitionedTableName string, partitioncolumns []string) (*api.Table, error) { + c := new(api.CreatePartitionedTableRequest). + SetSrcTable(tableName). + SetPartitionedTableName(partitionedTableName). + SetPartitionColumns(partitioncolumns) + t, err := ddb.CreatePartitionedTable(c) + return t, err +} + +func DropPartition(db api.DolphinDB, partitionedTableName string, dbPath string, partitionPaths string) error { + t := new(api.DropPartitionRequest). + SetPartitionPaths(partitionPaths). + SetDBPath(dbPath). + SetTableName(partitionedTableName) + err := db.DropPartition(t) + return err +} + +func LoadPartitionedTable(db api.DolphinDB, partitionedTableName string, dbPath string) (*api.Table, error) { + t := new(api.LoadTableRequest). + SetTableName(partitionedTableName). + SetDatabase(dbPath) + df, err := db.LoadTable(t) + return df, err +} + +func CreateDfsDimensiondb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, RANGE, 1..10) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createTable(tdata, "` + tableName1 + `").append!(tdata) + db.createTable(tdata, "` + tableName2 + `").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsRangedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, RANGE, 0..10*10000+1) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsRangedbChunkGranularity(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, RANGE, 0..10*10000+1, chunkGranularity="DATABASE") + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsHashdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, HASH, [INT,10]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsHashdbChunkGranularity(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, HASH, [INT,10], chunkGranularity="DATABASE") + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsValuedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db=database(dbPath, VALUE, 2010.01.01..2010.01.30) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","date").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","date").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsValuedbChunkGranularity(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db=database(dbPath, VALUE, 2010.01.01..2010.01.30, chunkGranularity="DATABASE") + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","date").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","date").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsListdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, LIST, [["AMD", "QWE", "CES"],["DOP", "ASZ"],["FSD", "BBVC"],["AWQ", "DS"]]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","sym").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","sym").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsListdbChunkGranularity(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)) + dropDatabase(dbPath) + db=database(dbPath, LIST, [["AMD", "QWE", "CES"],["DOP", "ASZ"],["FSD", "BBVC"],["AWQ", "DS"]], chunkGranularity="DATABASE") + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","sym").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","sym").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsCompoRangeRangedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db1=database('', RANGE, 2010.01M+0..12) + db2=database('', RANGE, 1 3 5 7 9 11) + db=database(dbPath, COMPO, [db1,db2]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["date", "id"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["date", "id"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsCompoRangeRangedbChunkGranularity(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db1=database('', RANGE, 2010.01M+0..12) + db2=database('', RANGE, 1 3 5 7 9 11) + db=database(dbPath, COMPO, [db1,db2], chunkGranularity="DATABASE") + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["date", "id"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["date", "id"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsCompoRangeValuedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db1=database('', RANGE, 0..10*10000+1) + db2=database('', VALUE, 2010.01.01..2010.01.30) + db=database(dbPath, COMPO, [db1, db2]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["id", "date"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["id", "date"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsCompoRangeHashdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db1=database('', RANGE, 2010.01M+0..12) + db2=database('', HASH, [INT, 10]) + db=database(dbPath, COMPO, [db1, db2]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["date", "id"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["date", "id"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDfsCompoRangeListdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(existsDatabase(dbPath)){dropDatabase(dbPath)} + db1=database('', RANGE, 2010.01M+0..12) + db2=database('', LIST, ["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"]) + db=database(dbPath, COMPO, [db1, db2]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["date", "sym"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["date", "sym"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskUnpartitioneddb(dbPath string, tbName1 string, tbName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db=database(dbPath) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + saveTable(db, tdata, "` + tbName1 + `") + saveTable(db, tdata, "` + tbName2 + `") + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskRangedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db=database(dbPath, RANGE, 0..10*10000+1) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskHashdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db=database(dbPath, HASH, [INT, 10]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","id").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","id").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskValuedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db=database(dbPath, VALUE, 2010.01.01..2010.01.30) + n=100000 + tdata=table(sort(take(2010.01.01..2010.01.30, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","date").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","date").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskListdb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db=database(dbPath,LIST,[["AMD", "QWE", "CES"],["DOP", "ASZ"],["FSD", "BBVC"],["AWQ", "DS"]]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `","sym").append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `","sym").append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func CreateDiskCompoRangeRangedb(dbPath string, tableName1 string, tableName2 string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + ddbScript := ` + dbPath="` + dbPath + `" + if(exists(dbPath)){rmdir(dbPath, true)} + db1=database('', RANGE, 2010.01M+0..12) + db2=database('', RANGE, 1 3 5 7 9 11) + db=database(dbPath, COMPO, [db1, db2]) + n=100000 + tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10, n) as id, take(["AMD", "QWE", "CES", "DOP", "ASZ", "FSD", "BBVC", "AWQ", "DS"], n) as sym, rand(100, n) as val) + db.createPartitionedTable(tdata,"` + tableName1 + `",["date", "id"]).append!(tdata) + db.createPartitionedTable(tdata,"` + tableName2 + `",["date", "id"]).append!(tdata) + ` + _, err = ddb.RunScript(ddbScript) + AssertNil(err) + errClose := ddb.Close() + AssertNil(errClose) +} + +func SaveText(ddb api.DolphinDB, obj string, remoteFilePath string) error { + t := new(api.SaveTextRequest). + SetFileName(remoteFilePath). + SetObj(obj) + err := ddb.SaveText(t) + return err +} + +func CreateDBConnectionPool(threadNumCount int, loadbalance bool) *api.DBConnectionPool { + opt := &api.PoolOption{ + Address: setup.Address, + UserID: setup.UserName, + Password: setup.Password, + PoolSize: threadNumCount, + LoadBalance: loadbalance, + } + pool, err := api.NewDBConnectionPool(opt) + AssertNil(err) + return pool +} + +func ClearEnv() { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + _, err = ddb.RunScript("a = getStreamingStat().pubTables\n" + + "for(i in a){\n" + + "\ttry{stopPublishTable(i.subscriber.split(\":\")[0],int(i.subscriber.split(\":\")[1]),i.tableName,i.actions)}catch(ex){}\n" + + "}") + AssertNil(err) + _, err = ddb.RunScript("def getAllShare(){\n" + + "\treturn select name from objs(true) where shared=1\n" + + "\t}\n" + + "\n" + + "def clearShare(){\n" + + "\tlogin(`admin,`123456)\n" + + "\tallShare=exec name from pnodeRun(getAllShare)\n" + + "\tfor(i in allShare){\n" + + "\t\ttry{\n" + + "\t\t\trpc((exec node from pnodeRun(getAllShare) where name =i)[0],clearTablePersistence,objByName(i))\n" + + "\t\t\t}catch(ex1){}\n" + + "\t\trpc((exec node from pnodeRun(getAllShare) where name =i)[0],undef,i,SHARED)\n" + + "\t}\n" + + "\ttry{\n" + + "\t\tPST_DIR=rpc(getControllerAlias(),getDataNodeConfig{getNodeAlias()})['persistenceDir']\n" + + "\t}catch(ex1){}\n" + + "}\n" + + "clearShare()") + AssertNil(err) + err = ddb.Close() + AssertNil(err) +} + +func ClearStreamTable(tableName string) { + ddb, err := api.NewSimpleDolphinDBClient(context.TODO(), setup.Address, setup.UserName, setup.Password) + AssertNil(err) + script := "login(`admin,`123456);" + + "try{dropStreamTable('" + tableName + "')}catch(ex){};" + _, err = ddb.RunScript(script) + AssertNil(err) + err = ddb.Close() + AssertNil(err) +} + +func CheckmodelTableEqual(t1 *model.Table, t2 *model.Table, n int) bool { + for i := 0; i < t1.Rows(); i++ { + for j := 0; j < len(t1.GetColumnNames()); j++ { + if t1.GetColumnByIndex(j).Get(i).Value() != t2.GetColumnByIndex(j).Get(n+i).Value() { + return false + } + } + } + return true +}