diff --git a/.github/workflows/cluster-it-1c1d.yml b/.github/workflows/cluster-it-1c1d.yml index 46f591f9ce67..782b5fd90ac9 100644 --- a/.github/workflows/cluster-it-1c1d.yml +++ b/.github/workflows/cluster-it-1c1d.yml @@ -27,7 +27,7 @@ concurrency: env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 - GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} jobs: Simple: diff --git a/.github/workflows/cluster-it-1c3d.yml b/.github/workflows/cluster-it-1c3d.yml index e84feca528d8..2c62101c9dc3 100644 --- a/.github/workflows/cluster-it-1c3d.yml +++ b/.github/workflows/cluster-it-1c3d.yml @@ -27,7 +27,7 @@ concurrency: env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 - GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} jobs: Simple: diff --git a/.github/workflows/pipe-it-2cluster.yml b/.github/workflows/pipe-it-2cluster.yml index 5b62958d527d..55d4f621cebc 100644 --- a/.github/workflows/pipe-it-2cluster.yml +++ b/.github/workflows/pipe-it-2cluster.yml @@ -25,7 +25,7 @@ concurrency: env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 - GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} jobs: auto-create-schema: diff --git a/.github/workflows/sonar-codecov.yml b/.github/workflows/sonar-codecov.yml index 46fa342af15b..dab6cf493cdc 100644 --- a/.github/workflows/sonar-codecov.yml +++ b/.github/workflows/sonar-codecov.yml @@ -29,7 +29,7 @@ concurrency: env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 PR_NUMBER: ${{ github.event.number }} - GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} jobs: codecov: diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index 3c6ada72f53c..3763dc6dc9bb 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -30,7 +30,7 @@ concurrency: env: MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 - GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} jobs: unit-test: diff --git a/.gitignore b/.gitignore index 25f3070aae38..111f5922f1a3 100644 --- a/.gitignore +++ b/.gitignore @@ -122,3 +122,4 @@ iotdb-core/tsfile/src/main/antlr4/org/apache/tsfile/parser/gen/ # Develocity .mvn/.gradle-enterprise/ +.mvn/.develocity/ diff --git a/.mvn/gradle-enterprise.xml b/.mvn/develocity.xml similarity index 69% rename from .mvn/gradle-enterprise.xml rename to .mvn/develocity.xml index cf1a9a0a9e88..b505d1a36664 100644 --- a/.mvn/gradle-enterprise.xml +++ b/.mvn/develocity.xml @@ -19,20 +19,15 @@ under the License. --> - + https://ge.apache.org - false - - true - true - true - #{isFalse(env['GITHUB_ACTIONS'])} - ALWAYS - true + + + #{{'0.0.0.0'}} @@ -45,4 +40,4 @@ false - + diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml index c5b001d44862..f3f1983375a6 100644 --- a/.mvn/extensions.xml +++ b/.mvn/extensions.xml @@ -22,12 +22,12 @@ com.gradle - gradle-enterprise-maven-extension - 1.19.2 + develocity-maven-extension + 1.21.3 com.gradle common-custom-user-data-maven-extension - 1.12.4 + 2.0 diff --git a/code-coverage/pom.xml b/code-coverage/pom.xml index 19fec8ef59ec..aa1bd56ff3f6 100644 --- a/code-coverage/pom.xml +++ b/code-coverage/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-code-coverage pom diff --git a/distribution/pom.xml b/distribution/pom.xml index 7ad8a8083d7b..1f112d7fc284 100644 --- a/distribution/pom.xml +++ b/distribution/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-distribution pom @@ -33,25 +33,25 @@ org.apache.iotdb iotdb-server - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT zip org.apache.iotdb iotdb-cli - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT zip org.apache.iotdb iotdb-confignode - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT zip org.apache.iotdb library-udf - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT diff --git a/example/client-cpp-example/pom.xml b/example/client-cpp-example/pom.xml index 4e95a297d7b7..29229f707813 100644 --- a/example/client-cpp-example/pom.xml +++ b/example/client-cpp-example/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT client-cpp-example IoTDB: Example: CPP Client diff --git a/example/jdbc/pom.xml b/example/jdbc/pom.xml index 1d2152b998af..2eb25e0633e6 100644 --- a/example/jdbc/pom.xml +++ b/example/jdbc/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT jdbc-example IoTDB: Example: JDBC diff --git a/example/jdbc/src/main/java/org/apache/iotdb/JDBCCharsetExample.java b/example/jdbc/src/main/java/org/apache/iotdb/JDBCCharsetExample.java index 79ae881e3017..3ecb9c056462 100644 --- a/example/jdbc/src/main/java/org/apache/iotdb/JDBCCharsetExample.java +++ b/example/jdbc/src/main/java/org/apache/iotdb/JDBCCharsetExample.java @@ -45,7 +45,7 @@ public static void main(String[] args) throws Exception { final IoTDBStatement statement = (IoTDBStatement) connection.createStatement()) { final String insertSQLWithGB18030 = - "insert into root.测试(timestamp, 维语, 彝语, 繁体, 蒙文, 简体, 标点符号, 藏语) values(1, 'ئۇيغۇر تىلى', 'ꆈꌠꉙ', \"繁體\", 'ᠮᠣᠩᠭᠣᠯ ᠬᠡᠯᠡ', '简体', '——?!', \"བོད་སྐད།\");"; + "insert into root.测试(timestamp, 彝语, 繁体, 蒙文, 简体, 标点符号, 藏语) values(1, 'ꆈꌠꉙ', \"繁體\", 'ᠮᠣᠩᠭᠣᠯ ᠬᠡᠯᠡ', '简体', '——?!', \"བོད་སྐད།\");"; final byte[] insertSQLWithGB18030Bytes = insertSQLWithGB18030.getBytes("GB18030"); statement.execute(insertSQLWithGB18030Bytes); } catch (IoTDBSQLException e) { diff --git a/example/mqtt-customize/pom.xml b/example/mqtt-customize/pom.xml index fe01d1640f59..b67be1f44131 100644 --- a/example/mqtt-customize/pom.xml +++ b/example/mqtt-customize/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT customize-mqtt-example IoTDB: Example: Customized MQTT diff --git a/example/mqtt/pom.xml b/example/mqtt/pom.xml index 7e101fb4c428..62619735c0fc 100644 --- a/example/mqtt/pom.xml +++ b/example/mqtt/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT mqtt-example IoTDB: Example: MQTT diff --git a/example/pipe-count-point-processor/pom.xml b/example/pipe-count-point-processor/pom.xml index c487ea15a771..9b486cd08bd5 100644 --- a/example/pipe-count-point-processor/pom.xml +++ b/example/pipe-count-point-processor/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT pipe-count-point-processor-example IoTDB: Example: Pipe: Count Point Processor diff --git a/example/pipe-opc-ua-sink/pom.xml b/example/pipe-opc-ua-sink/pom.xml index afa5b6171ced..37107b08de5e 100644 --- a/example/pipe-opc-ua-sink/pom.xml +++ b/example/pipe-opc-ua-sink/pom.xml @@ -23,7 +23,7 @@ org.apache.iotdb iotdb-examples - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT 4.0.0 pipe-opc-ua-sink-example diff --git a/example/pom.xml b/example/pom.xml index 3a4f1a22d019..548f7004d0d4 100644 --- a/example/pom.xml +++ b/example/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-examples pom diff --git a/example/rest-java-example/pom.xml b/example/rest-java-example/pom.xml index 9d1a354faac4..970fe626dd9e 100644 --- a/example/rest-java-example/pom.xml +++ b/example/rest-java-example/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT rest-java-example IoTDB: Example: Java Rest diff --git a/example/schema/pom.xml b/example/schema/pom.xml index 1ac5536b4f6d..07c308576796 100644 --- a/example/schema/pom.xml +++ b/example/schema/pom.xml @@ -24,7 +24,7 @@ iotdb-examples org.apache.iotdb - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT schema-example IoTDB: Example: Schema diff --git a/example/session/pom.xml b/example/session/pom.xml index ac5d936f9977..818cebd1ccc0 100644 --- a/example/session/pom.xml +++ b/example/session/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT client-example IoTDB: Example: Session Client diff --git a/example/trigger/pom.xml b/example/trigger/pom.xml index bf4937557836..1970972306f4 100644 --- a/example/trigger/pom.xml +++ b/example/trigger/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT trigger-example IoTDB: Example: Trigger diff --git a/example/udf/pom.xml b/example/udf/pom.xml index 080b41807da5..db0cd0088459 100644 --- a/example/udf/pom.xml +++ b/example/udf/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-examples - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT udf-example IoTDB: Example: UDF diff --git a/integration-test/pom.xml b/integration-test/pom.xml index e2dd5c6425bd..68806474fc1f 100644 --- a/integration-test/pom.xml +++ b/integration-test/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT integration-test IoTDB: Integration-Test @@ -72,47 +72,47 @@ org.apache.iotdb iotdb-server - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-session - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-jdbc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb trigger-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb isession - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb service-rpc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-confignode - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb node-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.tsfile @@ -122,7 +122,7 @@ org.apache.iotdb udf-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb @@ -132,7 +132,7 @@ org.apache.iotdb iotdb-consensus - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.slf4j @@ -161,17 +161,17 @@ org.apache.iotdb iotdb-confignode - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-cli - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT commons-codec @@ -201,7 +201,7 @@ org.apache.iotdb iotdb-server - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT test-jar test diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBRepairDataIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBRepairDataIT.java index fc84978dc906..25d2935b9447 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBRepairDataIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/IoTDBRepairDataIT.java @@ -70,8 +70,7 @@ public void testRepairData() { statement.execute("CREATE DATABASE root.tesgsg"); statement.execute("CREATE TIMESERIES root.testsg.d1.s1 WITH DATATYPE=INT32, ENCODING=PLAIN"); File tsfile = generateUnsortedFile(); - statement.execute( - String.format("load \"%s\" verify=false", tsfile.getParentFile().getAbsolutePath())); + statement.execute(String.format("load \"%s\"", tsfile.getParentFile().getAbsolutePath())); Assert.assertFalse(validate(statement)); statement.execute("START REPAIR DATA"); diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderByForDebugIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderByForDebugIT.java new file mode 100644 index 000000000000..c6f502bd4b54 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/orderBy/IoTDBOrderByForDebugIT.java @@ -0,0 +1,1406 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.db.it.orderBy; + +import org.apache.iotdb.it.env.EnvFactory; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.ClusterIT; +import org.apache.iotdb.itbase.category.LocalStandaloneIT; + +import org.bouncycastle.util.Arrays; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Objects; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.fail; + +@RunWith(IoTDBTestRunner.class) +@Category({LocalStandaloneIT.class, ClusterIT.class}) +public class IoTDBOrderByForDebugIT { + + // the data can be viewed in + // https://docs.google.com/spreadsheets/d/1OWA1bKraArCwWVnuTjuhJ5yLG0PFLdD78gD6FjquepI/edit#gid=0 + private static final String[] sql = + new String[] { + "CREATE DATABASE root.sg", + "CREATE TIMESERIES root.sg.d.num WITH DATATYPE=INT32, ENCODING=RLE", + "CREATE TIMESERIES root.sg.d.bigNum WITH DATATYPE=INT64, ENCODING=RLE", + "CREATE TIMESERIES root.sg.d.floatNum WITH DATATYPE=DOUBLE, ENCODING=RLE, 'MAX_POINT_NUMBER'='5'", + "CREATE TIMESERIES root.sg.d.str WITH DATATYPE=TEXT, ENCODING=PLAIN", + "CREATE TIMESERIES root.sg.d.bool WITH DATATYPE=BOOLEAN, ENCODING=PLAIN", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(0,3,2947483648,231.2121,\"coconut\",FALSE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(20,2,2147483648,434.12,\"pineapple\",TRUE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(40,1,2247483648,12.123,\"apricot\",TRUE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(80,9,2147483646,43.12,\"apple\",FALSE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(100,8,2147483964,4654.231,\"papaya\",TRUE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(31536000000,6,2147483650,1231.21,\"banana\",TRUE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(31536000100,10,3147483648,231.55,\"pumelo\",FALSE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(31536000500,4,2147493648,213.1,\"peach\",FALSE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(31536001000,5,2149783648,56.32,\"orange\",FALSE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(31536010000,7,2147983648,213.112,\"lemon\",TRUE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(31536100000,11,2147468648,54.121,\"pitaya\",FALSE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(41536000000,12,2146483648,45.231,\"strawberry\",FALSE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(41536000020,14,2907483648,231.34,\"cherry\",FALSE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(41536900000,13,2107483648,54.12,\"lychee\",TRUE)", + "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(51536000000,15,3147483648,235.213,\"watermelon\",TRUE)", + // Newly added 'flush' command compared to IoTDBOrderByIT + "flush" + }; + + private static final String[] sql2 = + new String[] { + "CREATE TIMESERIES root.sg.d2.num WITH DATATYPE=INT32, ENCODING=RLE", + "CREATE TIMESERIES root.sg.d2.bigNum WITH DATATYPE=INT64, ENCODING=RLE", + "CREATE TIMESERIES root.sg.d2.floatNum WITH DATATYPE=DOUBLE, ENCODING=RLE, 'MAX_POINT_NUMBER'='5'", + "CREATE TIMESERIES root.sg.d2.str WITH DATATYPE=TEXT, ENCODING=PLAIN", + "CREATE TIMESERIES root.sg.d2.bool WITH DATATYPE=BOOLEAN, ENCODING=PLAIN", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(0,3,2947483648,231.2121,\"coconut\",FALSE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(20,2,2147483648,434.12,\"pineapple\",TRUE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(40,1,2247483648,12.123,\"apricot\",TRUE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(80,9,2147483646,43.12,\"apple\",FALSE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(100,8,2147483964,4654.231,\"papaya\",TRUE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(31536000000,6,2147483650,1231.21,\"banana\",TRUE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(31536000100,10,3147483648,231.55,\"pumelo\",FALSE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(31536000500,4,2147493648,213.1,\"peach\",FALSE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(31536001000,5,2149783648,56.32,\"orange\",FALSE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(31536010000,7,2147983648,213.112,\"lemon\",TRUE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(31536100000,11,2147468648,54.121,\"pitaya\",FALSE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(41536000000,12,2146483648,45.231,\"strawberry\",FALSE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(41536000020,14,2907483648,231.34,\"cherry\",FALSE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(41536900000,13,2107483648,54.12,\"lychee\",TRUE)", + "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(51536000000,15,3147483648,235.213,\"watermelon\",TRUE)", + // Newly added 'flush' command compared to IoTDBOrderByIT + "flush" + }; + + @BeforeClass + public static void setUp() throws Exception { + EnvFactory.getEnv().getConfig().getDataNodeCommonConfig().setSortBufferSize(1024 * 1024L); + EnvFactory.getEnv().initClusterEnvironment(); + insertData(); + } + + @AfterClass + public static void tearDown() throws Exception { + EnvFactory.getEnv().cleanClusterEnvironment(); + } + + protected static void insertData() { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + for (String sql : sql) { + statement.execute(sql); + } + for (String sql : sql2) { + statement.execute(sql); + } + } catch (Exception e) { + e.printStackTrace(); + } + } + + // ordinal data + String[][] res = + new String[][] { + {"0", "3", "2947483648", "231.2121", "coconut", "false"}, + {"20", "2", "2147483648", "434.12", "pineapple", "true"}, + {"40", "1", "2247483648", "12.123", "apricot", "true"}, + {"80", "9", "2147483646", "43.12", "apple", "false"}, + {"100", "8", "2147483964", "4654.231", "papaya", "true"}, + {"31536000000", "6", "2147483650", "1231.21", "banana", "true"}, + {"31536000100", "10", "3147483648", "231.55", "pumelo", "false"}, + {"31536000500", "4", "2147493648", "213.1", "peach", "false"}, + {"31536001000", "5", "2149783648", "56.32", "orange", "false"}, + {"31536010000", "7", "2147983648", "213.112", "lemon", "true"}, + {"31536100000", "11", "2147468648", "54.121", "pitaya", "false"}, + {"41536000000", "12", "2146483648", "45.231", "strawberry", "false"}, + {"41536000020", "14", "2907483648", "231.34", "cherry", "false"}, + {"41536900000", "13", "2107483648", "54.12", "lychee", "true"}, + {"51536000000", "15", "3147483648", "235.213", "watermelon", "true"}, + }; + + private void checkHeader(ResultSetMetaData resultSetMetaData, String[] title) + throws SQLException { + for (int i = 1; i <= resultSetMetaData.getColumnCount(); i++) { + assertEquals(title[i - 1], resultSetMetaData.getColumnName(i)); + } + } + + private void testNormalOrderBy(String sql, int[] ans) { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + ResultSetMetaData metaData = resultSet.getMetaData(); + checkHeader( + metaData, + new String[] { + "Time", + "root.sg.d.num", + "root.sg.d.bigNum", + "root.sg.d.floatNum", + "root.sg.d.str", + "root.sg.d.bool" + }); + int i = 0; + while (resultSet.next()) { + + String actualTime = resultSet.getString(1); + String actualNum = resultSet.getString(2); + String actualBigNum = resultSet.getString(3); + double actualFloatNum = resultSet.getDouble(4); + String actualStr = resultSet.getString(5); + String actualBool = resultSet.getString(6); + + assertEquals(res[ans[i]][0], actualTime); + assertEquals(res[ans[i]][1], actualNum); + assertEquals(res[ans[i]][2], actualBigNum); + assertEquals(Double.parseDouble(res[ans[i]][3]), actualFloatNum, 0.0001); + assertEquals(res[ans[i]][4], actualStr); + assertEquals(res[ans[i]][5], actualBool); + + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + // 1. One-level order by test + @Test + public void orderByTest1() { + String sql = "select num,bigNum,floatNum,str,bool from root.sg.d order by num"; + int[] ans = {2, 1, 0, 7, 8, 5, 9, 4, 3, 6, 10, 11, 13, 12, 14}; + testNormalOrderBy(sql, ans); + } + + @Test + public void orderByTest2() { + String sql = "select num,bigNum,floatNum,str,bool from root.sg.d order by bigNum,time"; + int[] ans = {13, 11, 10, 3, 1, 5, 4, 7, 9, 8, 2, 12, 0, 6, 14}; + testNormalOrderBy(sql, ans); + } + + @Test + public void orderByTest3() { + String sql = "select num,bigNum,floatNum,str,bool from root.sg.d order by floatNum"; + int[] ans = {2, 3, 11, 13, 10, 8, 7, 9, 0, 12, 6, 14, 1, 5, 4}; + testNormalOrderBy(sql, ans); + } + + @Test + public void orderByTest4() { + String sql = "select num,bigNum,floatNum,str,bool from root.sg.d order by str"; + int[] ans = {3, 2, 5, 12, 0, 9, 13, 8, 4, 7, 1, 10, 6, 11, 14}; + testNormalOrderBy(sql, ans); + } + + @Test + public void orderByTest5() { + String sql = "select num,bigNum,floatNum,str,bool from root.sg.d order by num desc"; + int[] ans = {2, 1, 0, 7, 8, 5, 9, 4, 3, 6, 10, 11, 13, 12, 14}; + testNormalOrderBy(sql, Arrays.reverse(ans)); + } + + @Test + public void orderByTest6() { + String sql = + "select num,bigNum,floatNum,str,bool from root.sg.d order by bigNum desc, time asc"; + int[] ans = {6, 14, 0, 12, 2, 8, 9, 7, 4, 5, 1, 3, 10, 11, 13}; + testNormalOrderBy(sql, ans); + } + + @Test + public void orderByTest7() { + String sql = "select num,bigNum,floatNum,str,bool from root.sg.d order by floatNum desc"; + int[] ans = {2, 3, 11, 13, 10, 8, 7, 9, 0, 12, 6, 14, 1, 5, 4}; + testNormalOrderBy(sql, Arrays.reverse(ans)); + } + + @Test + public void orderByTest8() { + String sql = "select num,bigNum,floatNum,str,bool from root.sg.d order by str desc"; + int[] ans = {3, 2, 5, 12, 0, 9, 13, 8, 4, 7, 1, 10, 6, 11, 14}; + testNormalOrderBy(sql, Arrays.reverse(ans)); + } + + @Test + public void orderByTest15() { + String sql = "select num+bigNum,floatNum from root.sg.d order by str"; + int[] ans = {3, 2, 5, 12, 0, 9, 13, 8, 4, 7, 1, 10, 6, 11, 14}; + + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + ResultSetMetaData metaData = resultSet.getMetaData(); + checkHeader( + metaData, + new String[] {"Time", "root.sg.d.num + root.sg.d.bigNum", "root.sg.d.floatNum"}); + int i = 0; + while (resultSet.next()) { + + String actualTime = resultSet.getString(1); + double actualNum = resultSet.getDouble(2); + double actualFloat = resultSet.getDouble(3); + + assertEquals(res[ans[i]][0], actualTime); + assertEquals( + Long.parseLong(res[ans[i]][1]) + Long.parseLong(res[ans[i]][2]), actualNum, 0.0001); + assertEquals(Double.parseDouble(res[ans[i]][3]), actualFloat, 0.0001); + + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + // 2. Multi-level order by test + @Test + public void orderByTest9() { + String sql = "select num,bigNum,floatNum,str,bool from root.sg.d order by bool asc, str asc"; + int[] ans = {3, 12, 0, 8, 7, 10, 6, 11, 2, 5, 9, 13, 4, 1, 14}; + testNormalOrderBy(sql, ans); + } + + @Test + public void orderByTest10() { + String sql = "select num,bigNum,floatNum,str,bool from root.sg.d order by bool asc, num desc"; + int[] ans = {12, 11, 10, 6, 3, 8, 7, 0, 14, 13, 4, 9, 5, 1, 2}; + testNormalOrderBy(sql, ans); + } + + @Test + public void orderByTest11() { + String sql = + "select num,bigNum,floatNum,str,bool from root.sg.d order by bigNum desc, floatNum desc"; + int[] ans = {14, 6, 0, 12, 2, 8, 9, 7, 4, 5, 1, 3, 10, 11, 13}; + testNormalOrderBy(sql, ans); + } + + @Test + public void orderByTest12() { + String sql = + "select num,bigNum,floatNum,str,bool from root.sg.d order by str desc, floatNum desc"; + int[] ans = {3, 2, 5, 12, 0, 9, 13, 8, 4, 7, 1, 10, 6, 11, 14}; + testNormalOrderBy(sql, Arrays.reverse(ans)); + } + + @Test + public void orderByTest13() { + String sql = + "select num,bigNum,floatNum,str,bool from root.sg.d order by num+floatNum desc, floatNum desc"; + int[] ans = {4, 5, 1, 14, 12, 6, 0, 9, 7, 13, 10, 8, 11, 3, 2}; + testNormalOrderBy(sql, ans); + } + + @Test + public void orderByTest14() { + String sql = "select num+bigNum from root.sg.d order by num+floatNum desc, floatNum desc"; + int[] ans = {4, 5, 1, 14, 12, 6, 0, 9, 7, 13, 10, 8, 11, 3, 2}; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + ResultSetMetaData metaData = resultSet.getMetaData(); + checkHeader(metaData, new String[] {"Time", "root.sg.d.num + root.sg.d.bigNum"}); + int i = 0; + while (resultSet.next()) { + + String actualTime = resultSet.getString(1); + double actualNum = resultSet.getDouble(2); + + assertEquals(res[ans[i]][0], actualTime); + assertEquals( + Long.parseLong(res[ans[i]][1]) + Long.parseLong(res[ans[i]][2]), actualNum, 0.001); + + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByTest16() { + String sql = "select num+floatNum from root.sg.d order by floatNum+num desc, floatNum desc"; + int[] ans = {4, 5, 1, 14, 12, 6, 0, 9, 7, 13, 10, 8, 11, 3, 2}; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + ResultSetMetaData metaData = resultSet.getMetaData(); + checkHeader(metaData, new String[] {"Time", "root.sg.d.num + root.sg.d.floatNum"}); + int i = 0; + while (resultSet.next()) { + + String actualTime = resultSet.getString(1); + double actualNum = resultSet.getDouble(2); + + assertEquals(res[ans[i]][0], actualTime); + assertEquals( + Long.parseLong(res[ans[i]][1]) + Double.parseDouble(res[ans[i]][3]), + actualNum, + 0.001); + + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByTest17() { + String sql = "select num,bigNum,floatNum,str,bool from root.sg.d order by str desc, str asc"; + int[] ans = {3, 2, 5, 12, 0, 9, 13, 8, 4, 7, 1, 10, 6, 11, 14}; + testNormalOrderBy(sql, Arrays.reverse(ans)); + } + + @Test + public void orderByTest18() { + String sql = "select num,bigNum,floatNum,str,bool from root.sg.d order by str, str"; + int[] ans = {3, 2, 5, 12, 0, 9, 13, 8, 4, 7, 1, 10, 6, 11, 14}; + testNormalOrderBy(sql, ans); + } + + // limit cannot be pushed down in ORDER BY + @Test + public void orderByTest19() { + String sql = "select num from root.sg.d order by num limit 5"; + int[] ans = {2, 1, 0, 7, 8}; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + ResultSetMetaData metaData = resultSet.getMetaData(); + checkHeader(metaData, new String[] {"Time", "root.sg.d.num"}); + int i = 0; + while (resultSet.next()) { + String actualTime = resultSet.getString(1); + String actualNum = resultSet.getString(2); + assertEquals(res[ans[i]][0], actualTime); + assertEquals(res[ans[i]][1], actualNum); + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + // 3. aggregation query + @Test + public void orderByInAggregationTest() { + String sql = "select avg(num) from root.sg.d group by session(10000ms) order by avg(num) desc"; + double[][] ans = new double[][] {{15.0}, {13.0}, {13.0}, {11.0}, {6.4}, {4.6}}; + long[] times = + new long[] {51536000000L, 41536000000L, 41536900000L, 31536100000L, 31536000000L, 0L}; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + long actualTime = resultSet.getLong(1); + double actualAvg = resultSet.getDouble(2); + assertEquals(times[i], actualTime); + assertEquals(ans[i][0], actualAvg, 0.0001); + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByInAggregationTest2() { + String sql = + "select avg(num) from root.sg.d group by session(10000ms) order by max_value(floatNum)"; + double[][] ans = + new double[][] { + {13.0, 54.12}, + {11.0, 54.121}, + {13.0, 231.34}, + {15.0, 235.213}, + {6.4, 1231.21}, + {4.6, 4654.231} + }; + long[] times = + new long[] {41536900000L, 31536100000L, 41536000000L, 51536000000L, 31536000000L, 0L}; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + long actualTime = resultSet.getLong(1); + double actualAvg = resultSet.getDouble(2); + assertEquals(times[i], actualTime); + assertEquals(ans[i][0], actualAvg, 0.0001); + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByInAggregationTest3() { + String sql = + "select avg(num) from root.sg.d group by session(10000ms) order by avg(num) desc,max_value(floatNum)"; + double[] ans = new double[] {15.0, 13.0, 13.0, 11.0, 6.4, 4.6}; + long[] times = + new long[] {51536000000L, 41536900000L, 41536000000L, 31536100000L, 31536000000L, 0L}; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + long actualTime = resultSet.getLong(1); + double actualAvg = resultSet.getDouble(2); + assertEquals(times[i], actualTime); + assertEquals(ans[i], actualAvg, 0.0001); + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByInAggregationTest4() { + String sql = + "select avg(num)+avg(floatNum) from root.sg.d group by session(10000ms) order by avg(num)+avg(floatNum)"; + double[][] ans = + new double[][] {{1079.56122}, {395.4584}, {65.121}, {151.2855}, {67.12}, {250.213}}; + long[] times = + new long[] {0L, 31536000000L, 31536100000L, 41536000000L, 41536900000L, 51536000000L}; + int[] order = new int[] {2, 4, 3, 5, 1, 0}; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + long actualTime = resultSet.getLong(1); + double actualAvg = resultSet.getDouble(2); + assertEquals(times[order[i]], actualTime); + assertEquals(ans[order[i]][0], actualAvg, 0.0001); + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByInAggregationTest5() { + String sql = + "select min_value(bigNum) from root.sg.d group by session(10000ms) order by avg(num)+avg(floatNum)"; + long[] ans = + new long[] {2147483646L, 2147483650L, 2147468648L, 2146483648L, 2107483648L, 3147483648L}; + long[] times = + new long[] {0L, 31536000000L, 31536100000L, 41536000000L, 41536900000L, 51536000000L}; + int[] order = new int[] {2, 4, 3, 5, 1, 0}; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + long actualTime = resultSet.getLong(1); + long actualMinValue = resultSet.getLong(2); + assertEquals(times[order[i]], actualTime); + assertEquals(ans[order[i]], actualMinValue, 0.0001); + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByInAggregationTest6() { + String sql = + "select min_value(num)+min_value(bigNum) from root.sg.d group by session(10000ms) order by avg(num)+avg(floatNum)"; + long[] ans = + new long[] {2147483647L, 2147483654L, 2147468659L, 2146483660L, 2107483661L, 3147483663L}; + long[] times = + new long[] {0L, 31536000000L, 31536100000L, 41536000000L, 41536900000L, 51536000000L}; + int[] order = new int[] {2, 4, 3, 5, 1, 0}; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + long actualTime = resultSet.getLong(1); + double actualMinValue = resultSet.getDouble(2); + assertEquals(times[order[i]], actualTime); + assertEquals(ans[order[i]], actualMinValue, 0.0001); + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByInAggregationTest7() { + String sql = + "select avg(num)+min_value(floatNum) from root.sg.d group by session(10000ms) order by max_value(floatNum)"; + double[][] ans = + new double[][] { + {13.0, 54.12, 54.12}, + {11.0, 54.121, 54.121}, + {13.0, 231.34, 45.231}, + {15.0, 235.213, 235.213}, + {6.4, 1231.21, 56.32}, + {4.6, 4654.231, 12.123} + }; + long[] times = + new long[] {41536900000L, 31536100000L, 41536000000L, 51536000000L, 31536000000L, 0L}; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + long actualTime = resultSet.getLong(1); + double actualAvg = resultSet.getDouble(2); + assertEquals(times[i], actualTime); + assertEquals(ans[i][0] + ans[i][2], actualAvg, 0.0001); + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByInAggregationTest8() { + String sql = + "select avg(num)+avg(floatNum) from root.sg.d group by session(10000ms) order by avg(floatNum)+avg(num)"; + double[][] ans = + new double[][] {{1079.56122}, {395.4584}, {65.121}, {151.2855}, {67.12}, {250.213}}; + long[] times = + new long[] {0L, 31536000000L, 31536100000L, 41536000000L, 41536900000L, 51536000000L}; + int[] order = new int[] {2, 4, 3, 5, 1, 0}; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + long actualTime = resultSet.getLong(1); + double actualAvg = resultSet.getDouble(2); + assertEquals(times[order[i]], actualTime); + assertEquals(ans[order[i]][0], actualAvg, 0.0001); + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + // 4. raw data query with align by device + private void testNormalOrderByAlignByDevice(String sql, int[] ans) { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + ResultSetMetaData metaData = resultSet.getMetaData(); + checkHeader( + metaData, new String[] {"Time", "Device", "num", "bigNum", "floatNum", "str", "bool"}); + int i = 0; + int total = 0; + String device = "root.sg.d"; + while (resultSet.next()) { + + String actualTime = resultSet.getString(1); + String actualDevice = resultSet.getString(2); + String actualNum = resultSet.getString(3); + String actualBigNum = resultSet.getString(4); + double actualFloatNum = resultSet.getDouble(5); + String actualStr = resultSet.getString(6); + String actualBool = resultSet.getString(7); + + assertEquals(device, actualDevice); + assertEquals(res[ans[i]][0], actualTime); + assertEquals(res[ans[i]][1], actualNum); + assertEquals(res[ans[i]][2], actualBigNum); + assertEquals(Double.parseDouble(res[ans[i]][3]), actualFloatNum, 0.0001); + assertEquals(res[ans[i]][4], actualStr); + assertEquals(res[ans[i]][5], actualBool); + + if (device.equals("root.sg.d")) { + device = "root.sg.d2"; + } else { + device = "root.sg.d"; + i++; + } + total++; + } + assertEquals(i, ans.length); + assertEquals(total, ans.length * 2); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void alignByDeviceOrderByTest1() { + String sql = + "select num+bigNum from root.** order by num+floatNum desc, floatNum desc align by device"; + int[] ans = {4, 5, 1, 14, 12, 6, 0, 9, 7, 13, 10, 8, 11, 3, 2}; + String device = "root.sg.d"; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + String actualTime = resultSet.getString(1); + String actualDevice = resultSet.getString(2); + double actualNum = resultSet.getDouble(3); + + assertEquals(device, actualDevice); + assertEquals(res[ans[i]][0], actualTime); + assertEquals( + Long.parseLong(res[ans[i]][1]) + Long.parseLong(res[ans[i]][2]), actualNum, 0.0001); + if (device.equals("root.sg.d")) { + device = "root.sg.d2"; + } else { + device = "root.sg.d"; + i++; + } + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void alignByDeviceOrderByTest2() { + String sql = "select num,bigNum,floatNum,str,bool from root.** order by num align by device"; + int[] ans = {2, 1, 0, 7, 8, 5, 9, 4, 3, 6, 10, 11, 13, 12, 14}; + testNormalOrderByAlignByDevice(sql, ans); + } + + @Test + public void alignByDeviceOrderByTest3() { + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by floatNum align by device"; + int[] ans = {2, 3, 11, 13, 10, 8, 7, 9, 0, 12, 6, 14, 1, 5, 4}; + testNormalOrderByAlignByDevice(sql, ans); + } + + @Test + public void alignByDeviceOrderByTest4() { + String sql = "select num,bigNum,floatNum,str,bool from root.** order by str align by device"; + int[] ans = {3, 2, 5, 12, 0, 9, 13, 8, 4, 7, 1, 10, 6, 11, 14}; + testNormalOrderByAlignByDevice(sql, ans); + } + + @Test + public void alignByDeviceOrderByTest5() { + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by num desc align by device"; + int[] ans = {2, 1, 0, 7, 8, 5, 9, 4, 3, 6, 10, 11, 13, 12, 14}; + testNormalOrderByAlignByDevice(sql, Arrays.reverse(ans)); + } + + @Test + public void alignByDeviceOrderByTest6() { + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by str desc align by device"; + int[] ans = {3, 2, 5, 12, 0, 9, 13, 8, 4, 7, 1, 10, 6, 11, 14}; + testNormalOrderByAlignByDevice(sql, Arrays.reverse(ans)); + } + + @Test + public void alignByDeviceOrderByTest7() { + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by bool asc, num desc align by device"; + int[] ans = {12, 11, 10, 6, 3, 8, 7, 0, 14, 13, 4, 9, 5, 1, 2}; + testNormalOrderByAlignByDevice(sql, ans); + } + + @Test + public void alignByDeviceOrderByTest8() { + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by bigNum desc, floatNum desc align by device"; + int[] ans = {14, 6, 0, 12, 2, 8, 9, 7, 4, 5, 1, 3, 10, 11, 13}; + testNormalOrderByAlignByDevice(sql, ans); + } + + @Test + public void alignByDeviceOrderByTest9() { + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by str desc, floatNum desc align by device"; + int[] ans = {3, 2, 5, 12, 0, 9, 13, 8, 4, 7, 1, 10, 6, 11, 14}; + testNormalOrderByAlignByDevice(sql, Arrays.reverse(ans)); + } + + private void testNormalOrderByMixAlignBy(String sql, int[] ans) { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + ResultSetMetaData metaData = resultSet.getMetaData(); + checkHeader( + metaData, new String[] {"Time", "Device", "num", "bigNum", "floatNum", "str", "bool"}); + int i = 0; + int total = 0; + String device = "root.sg.d"; + while (resultSet.next()) { + + String actualTime = resultSet.getString(1); + String actualDevice = resultSet.getString(2); + String actualNum = resultSet.getString(3); + String actualBigNum = resultSet.getString(4); + double actualFloatNum = resultSet.getDouble(5); + String actualStr = resultSet.getString(6); + String actualBool = resultSet.getString(7); + + assertEquals(device, actualDevice); + assertEquals(res[ans[i]][0], actualTime); + assertEquals(res[ans[i]][1], actualNum); + assertEquals(res[ans[i]][2], actualBigNum); + assertEquals(Double.parseDouble(res[ans[i]][3]), actualFloatNum, 0.0001); + assertEquals(res[ans[i]][4], actualStr); + assertEquals(res[ans[i]][5], actualBool); + + if (device.equals("root.sg.d2")) { + i++; + device = "root.sg.d"; + } else { + device = "root.sg.d2"; + } + + total++; + } + assertEquals(total, ans.length * 2); + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + private void testDeviceViewOrderByMixAlignBy(String sql, int[] ans) { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + ResultSetMetaData metaData = resultSet.getMetaData(); + checkHeader( + metaData, new String[] {"Time", "Device", "num", "bigNum", "floatNum", "str", "bool"}); + int i = 0; + int total = 0; + String device = "root.sg.d2"; + while (resultSet.next()) { + + String actualTime = resultSet.getString(1); + String actualDevice = resultSet.getString(2); + String actualNum = resultSet.getString(3); + String actualBigNum = resultSet.getString(4); + double actualFloatNum = resultSet.getDouble(5); + String actualStr = resultSet.getString(6); + String actualBool = resultSet.getString(7); + + assertEquals(device, actualDevice); + assertEquals(res[ans[i]][0], actualTime); + assertEquals(res[ans[i]][1], actualNum); + assertEquals(res[ans[i]][2], actualBigNum); + assertEquals(Double.parseDouble(res[ans[i]][3]), actualFloatNum, 0.0001); + assertEquals(res[ans[i]][4], actualStr); + assertEquals(res[ans[i]][5], actualBool); + + i++; + total++; + if (total == ans.length) { + i = 0; + if (device.equals("root.sg.d2")) { + device = "root.sg.d"; + } else { + device = "root.sg.d2"; + } + } + } + assertEquals(total, ans.length * 2); + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + private void orderByBigNumAlignByDevice(String sql, int[] ans) { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + ResultSetMetaData metaData = resultSet.getMetaData(); + checkHeader( + metaData, new String[] {"Time", "Device", "num", "bigNum", "floatNum", "str", "bool"}); + int i = 0; + int total = 0; + String device = "root.sg.d"; + while (resultSet.next()) { + + String actualTime = resultSet.getString(1); + String actualDevice = resultSet.getString(2); + String actualNum = resultSet.getString(3); + String actualBigNum = resultSet.getString(4); + double actualFloatNum = resultSet.getDouble(5); + String actualStr = resultSet.getString(6); + String actualBool = resultSet.getString(7); + + if (total < 4) { + i = total % 2; + if (total < 2) { + device = "root.sg.d2"; + } else { + device = "root.sg.d"; + } + } + + assertEquals(device, actualDevice); + assertEquals(res[ans[i]][0], actualTime); + assertEquals(res[ans[i]][1], actualNum); + assertEquals(res[ans[i]][2], actualBigNum); + assertEquals(Double.parseDouble(res[ans[i]][3]), actualFloatNum, 0.0001); + assertEquals(res[ans[i]][4], actualStr); + assertEquals(res[ans[i]][5], actualBool); + + if (device.equals("root.sg.d2")) { + device = "root.sg.d"; + } else { + i++; + device = "root.sg.d2"; + } + + total++; + } + assertEquals(i, ans.length); + assertEquals(total, ans.length * 2); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void alignByDeviceOrderByTest12() { + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by bigNum desc, device desc, time asc align by device"; + int[] ans = {6, 14, 0, 12, 2, 8, 9, 7, 4, 5, 1, 3, 10, 11, 13}; + orderByBigNumAlignByDevice(sql, ans); + } + + @Test + public void alignByDeviceOrderByTest13() { + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by bigNum desc, time desc, device asc align by device"; + int[] ans = {14, 6, 0, 12, 2, 8, 9, 7, 4, 5, 1, 3, 10, 11, 13}; + testNormalOrderByMixAlignBy(sql, ans); + } + + @Test + public void alignByDeviceOrderByTest14() { + int[] ans = {14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by time desc, bigNum desc, device asc align by device"; + testNormalOrderByMixAlignBy(sql, ans); + } + + @Test + public void alignByDeviceOrderByTest15() { + int[] ans = {6, 14, 0, 12, 2, 8, 9, 7, 4, 5, 1, 3, 10, 11, 13}; + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by device desc, bigNum desc, time asc align by device"; + testDeviceViewOrderByMixAlignBy(sql, ans); + } + + @Test + public void alignByDeviceOrderByTest16() { + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by device desc, time asc, bigNum desc align by device"; + int[] ans = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}; + testDeviceViewOrderByMixAlignBy(sql, ans); + } + + @Test + public void alignByDeviceOrderByTest17() { + String sql = + "select num,bigNum,floatNum,str,bool from root.** order by bigNum desc, device desc, num asc, time asc align by device"; + int[] ans = {6, 14, 0, 12, 2, 8, 9, 7, 4, 5, 1, 3, 10, 11, 13}; + orderByBigNumAlignByDevice(sql, ans); + } + + // 5. aggregation query align by device + @Test + public void orderByInAggregationAlignByDeviceTest() { + String sql = + "select avg(num) from root.** group by session(10000ms) order by avg(num) align by device"; + + double[] ans = {4.6, 4.6, 6.4, 6.4, 11.0, 11.0, 13.0, 13.0, 13.0, 13.0, 15.0, 15.0}; + long[] times = + new long[] { + 0L, + 0L, + 31536000000L, + 31536000000L, + 31536100000L, + 31536100000L, + 41536000000L, + 41536900000L, + 41536000000L, + 41536900000L, + 51536000000L, + 51536000000L + }; + String[] device = + new String[] { + "root.sg.d", + "root.sg.d2", + "root.sg.d", + "root.sg.d2", + "root.sg.d", + "root.sg.d2", + "root.sg.d", + "root.sg.d", + "root.sg.d2", + "root.sg.d2", + "root.sg.d", + "root.sg.d2" + }; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + long actualTime = resultSet.getLong(1); + String actualDevice = resultSet.getString(2); + double actualAvg = resultSet.getDouble(3); + + assertEquals(device[i], actualDevice); + assertEquals(times[i], actualTime); + assertEquals(ans[i], actualAvg, 0.0001); + i++; + } + assertEquals(i, ans.length); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByInAggregationAlignByDeviceTest2() { + String sql = "select avg(num) from root.** order by avg(num) align by device"; + String value = "8"; + checkSingleDouble(sql, value, true); + } + + private void checkSingleDouble(String sql, Object value, boolean deviceAsc) { + String device = "root.sg.d"; + if (!deviceAsc) device = "root.sg.d2"; + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + String deviceName = resultSet.getString(1); + double actualVal = resultSet.getDouble(2); + assertEquals(deviceName, device); + assertEquals(Double.parseDouble(value.toString()), actualVal, 1); + if (device.equals("root.sg.d")) device = "root.sg.d2"; + else device = "root.sg.d"; + i++; + } + assertEquals(i, 2); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByInAggregationAlignByDeviceTest3() { + String sql = + "select avg(num)+avg(bigNum) from root.** order by max_value(floatNum) align by device"; + long value = 2388936669L + 8; + checkSingleDouble(sql, value, true); + } + + @Test + public void orderByInAggregationAlignByDeviceTest4() { + + String sql = + "select avg(num)+avg(bigNum) from root.** order by max_value(floatNum)+min_value(num) align by device"; + long value = 2388936669L + 8; + checkSingleDouble(sql, value, true); + } + + @Test + public void orderByInAggregationAlignByDeviceTest5() { + String sql = + "select avg(num) from root.** order by max_value(floatNum)+avg(num) align by device"; + String value = "8"; + checkSingleDouble(sql, value, true); + } + + @Test + public void orderByInAggregationAlignByDeviceTest6() { + String sql = + "select avg(num) from root.** order by max_value(floatNum)+avg(num), device asc, time desc align by device"; + String value = "8"; + checkSingleDouble(sql, value, true); + } + + @Test + public void orderByInAggregationAlignByDeviceTest7() { + String sql = + "select avg(num) from root.** order by max_value(floatNum)+avg(num), time asc, device desc align by device"; + String value = "8"; + checkSingleDouble(sql, value, false); + } + + @Test + public void orderByInAggregationAlignByDeviceTest8() { + String sql = + "select avg(num) from root.** order by time asc, max_value(floatNum)+avg(num), device desc align by device"; + String value = "8"; + checkSingleDouble(sql, value, false); + } + + @Test + public void orderByInAggregationAlignByDeviceTest9() { + String sql = + "select avg(num) from root.** order by device asc, max_value(floatNum)+avg(num), time desc align by device"; + String value = "8"; + checkSingleDouble(sql, value, true); + } + + @Test + public void orderByInAggregationAlignByDeviceTest10() { + String sql = + "select avg(num) from root.** order by max_value(floatNum) desc,time asc, avg(num) asc, device desc align by device"; + String value = "8"; + checkSingleDouble(sql, value, false); + } + + @Test + public void orderByInAggregationAlignByDeviceTest11() { + String sql = + "select avg(num) from root.** order by max_value(floatNum) desc,device asc, avg(num) asc, time desc align by device"; + String value = "8"; + checkSingleDouble(sql, value, true); + } + + @Test + public void orderByInAggregationAlignByDeviceTest12() { + String sql = + "select avg(num+floatNum) from root.** order by time,avg(num+floatNum) align by device"; + String value = "537.34154"; + checkSingleDouble(sql, value, true); + } + + @Test + public void orderByInAggregationAlignByDeviceTest13() { + String sql = "select avg(num) from root.** order by time,avg(num+floatNum) align by device"; + String value = "8"; + checkSingleDouble(sql, value, true); + } + + @Test + public void orderByInAggregationAlignByDeviceTest14() { + String sql = "select avg(num+floatNum) from root.** order by time,avg(num) align by device"; + String value = "537.34154"; + checkSingleDouble(sql, value, true); + } + + String[][] UDFRes = + new String[][] { + {"0", "3", "0", "0"}, + {"20", "2", "0", "0"}, + {"40", "1", "0", "0"}, + {"80", "9", "0", "0"}, + {"100", "8", "0", "0"}, + {"31536000000", "6", "0", "0"}, + {"31536000100", "10", "0", "0"}, + {"31536000500", "4", "0", "0"}, + {"31536001000", "5", "0", "0"}, + {"31536010000", "7", "0", "0"}, + {"31536100000", "11", "0", "0"}, + {"41536000000", "12", "2146483648", "0"}, + {"41536000020", "14", "0", "14"}, + {"41536900000", "13", "2107483648", "0"}, + {"51536000000", "15", "0", "15"}, + }; + + // UDF Test + private void orderByUDFTest(String sql, int[] ans) { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + String time = resultSet.getString(1); + String num = resultSet.getString(2); + String topK = resultSet.getString(3); + String bottomK = resultSet.getString(4); + + assertEquals(time, UDFRes[ans[i]][0]); + assertEquals(num, UDFRes[ans[i]][1]); + if (Objects.equals(UDFRes[ans[i]][3], "0")) { + assertNull(topK); + } else { + assertEquals(topK, UDFRes[ans[i]][3]); + } + + if (Objects.equals(UDFRes[ans[i]][2], "0")) { + assertNull(bottomK); + } else { + assertEquals(bottomK, UDFRes[ans[i]][2]); + } + + i++; + } + assertEquals(i, 15); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void orderByUDFTest1() { + String sql = + "select num, top_k(num, 'k'='2'), bottom_k(bigNum, 'k'='2') from root.sg.d order by top_k(num, 'k'='2') nulls first, bottom_k(bigNum, 'k'='2') nulls first"; + int[] ans = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 13, 11, 12, 14}; + orderByUDFTest(sql, ans); + } + + @Test + public void orderByUDFTest2() { + String sql = + "select num, top_k(num, 'k'='2'), bottom_k(bigNum, 'k'='2') from root.sg.d order by top_k(num, 'k'='2'), bottom_k(bigNum, 'k'='2')"; + int[] ans = {12, 14, 13, 11, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; + orderByUDFTest(sql, ans); + } + + private void errorTest(String sql, String error) { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + statement.executeQuery(sql); + } catch (Exception e) { + assertEquals(error, e.getMessage()); + } + } + + @Test + public void errorTest1() { + errorTest( + "select num from root.sg.d order by avg(bigNum)", + "701: Raw data and aggregation hybrid query is not supported."); + } + + @Test + public void errorTest2() { + errorTest( + "select avg(num) from root.sg.d order by bigNum", + "701: Raw data and aggregation hybrid query is not supported."); + } + + @Test + public void errorTest3() { + errorTest( + "select bigNum,floatNum from root.sg.d order by s1", + "701: root.sg.d.s1 in order by clause doesn't exist."); + } + + @Test + public void errorTest4() { + errorTest( + "select bigNum,floatNum from root.** order by bigNum", + "701: root.**.bigNum in order by clause shouldn't refer to more than one timeseries."); + } + + @Test + public void errorTest5() { + errorTest( + "select bigNum,floatNum from root.** order by s1 align by device", + "701: s1 in order by clause doesn't exist."); + } + + @Test + public void errorTest6() { + errorTest( + "select bigNum,floatNum from root.** order by root.sg.d.bigNum align by device", + "701: ALIGN BY DEVICE: the suffix paths can only be measurement or one-level wildcard"); + } + + @Test + public void errorTest7() { + errorTest( + "select last bigNum,floatNum from root.** order by root.sg.d.bigNum", + "701: root.sg.d.bigNum in order by clause doesn't exist in the result of last query."); + } + + // last query + public void testLastQueryOrderBy(String sql, String[][] ans) { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + try (ResultSet resultSet = statement.executeQuery(sql)) { + int i = 0; + while (resultSet.next()) { + String time = resultSet.getString(1); + String num = resultSet.getString(2); + String value = resultSet.getString(3); + String dataType = resultSet.getString(4); + + assertEquals(time, ans[0][i]); + assertEquals(num, ans[1][i]); + assertEquals(value, ans[2][i]); + assertEquals(dataType, ans[3][i]); + + i++; + } + assertEquals(i, 4); + } + } catch (Exception e) { + e.printStackTrace(); + fail(); + } + } + + @Test + public void lastQueryOrderBy() { + String[][] ans = + new String[][] { + {"51536000000", "51536000000", "51536000000", "51536000000"}, + {"root.sg.d.num", "root.sg.d2.num", "root.sg.d.bigNum", "root.sg.d2.bigNum"}, + {"15", "15", "3147483648", "3147483648"}, + {"INT32", "INT32", "INT64", "INT64"} + }; + String sql = "select last bigNum,num from root.** order by value, timeseries"; + testLastQueryOrderBy(sql, ans); + } + + @Test + public void lastQueryOrderBy2() { + String[][] ans = + new String[][] { + {"51536000000", "51536000000", "51536000000", "51536000000"}, + {"root.sg.d2.num", "root.sg.d2.bigNum", "root.sg.d.num", "root.sg.d.bigNum"}, + {"15", "3147483648", "15", "3147483648"}, + {"INT32", "INT64", "INT32", "INT64"} + }; + String sql = "select last bigNum,num from root.** order by timeseries desc"; + testLastQueryOrderBy(sql, ans); + } + + @Test + public void lastQueryOrderBy3() { + String[][] ans = + new String[][] { + {"51536000000", "51536000000", "51536000000", "51536000000"}, + {"root.sg.d2.num", "root.sg.d2.bigNum", "root.sg.d.num", "root.sg.d.bigNum"}, + {"15", "3147483648", "15", "3147483648"}, + {"INT32", "INT64", "INT32", "INT64"} + }; + String sql = "select last bigNum,num from root.** order by timeseries desc, value asc"; + testLastQueryOrderBy(sql, ans); + } + + @Test + public void lastQueryOrderBy4() { + String[][] ans = + new String[][] { + {"51536000000", "51536000000", "51536000000", "51536000000"}, + {"root.sg.d2.num", "root.sg.d.num", "root.sg.d2.bigNum", "root.sg.d.bigNum"}, + {"15", "15", "3147483648", "3147483648"}, + {"INT32", "INT32", "INT64", "INT64"} + }; + String sql = "select last bigNum,num from root.** order by value, timeseries desc"; + testLastQueryOrderBy(sql, ans); + } + + @Test + public void lastQueryOrderBy5() { + String[][] ans = + new String[][] { + {"51536000000", "51536000000", "51536000000", "51536000000"}, + {"root.sg.d2.num", "root.sg.d.num", "root.sg.d2.bigNum", "root.sg.d.bigNum"}, + {"15", "15", "3147483648", "3147483648"}, + {"INT32", "INT32", "INT64", "INT64"} + }; + String sql = "select last bigNum,num from root.** order by datatype, timeseries desc"; + testLastQueryOrderBy(sql, ans); + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/regionscan/IoTDBActiveRegionScanIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/regionscan/IoTDBActiveRegionScanIT.java index bfbba1b0f6b9..c608bf3e24c0 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/regionscan/IoTDBActiveRegionScanIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/regionscan/IoTDBActiveRegionScanIT.java @@ -285,6 +285,28 @@ public void showActiveDeviceTest10() { basicShowActiveDeviceTest(sql, SHOW_DEVICES_COLUMN_NAMES, retArray); } + @Test + public void showActiveDeviceEmptyTest() { + String sql = "show devices root.empty where time < 50"; + String[] retArray = new String[] {}; + basicShowActiveDeviceTest(sql, SHOW_DEVICES_COLUMN_NAMES, retArray); + + sql = "count devices root.empty where time < 50"; + long value = 0; + basicCountActiveDeviceTest(sql, COUNT_DEVICES_COLUMN_NAMES, value); + } + + @Test + public void showActiveTimeseriesEmptyTest() { + String sql = "show timeseries root.empty where time < 50"; + String[] retArray = new String[] {}; + basicShowActiveDeviceTest(sql, SHOW_TIMESERIES_COLUMN_NAMES, retArray); + + sql = "count timeseries root.empty where time < 50"; + long value = 0; + basicCountActiveDeviceTest(sql, COUNT_TIMESERIES_COLUMN_NAMES, value); + } + @Test public void showActiveTimeseriesTest() { String sql = "show timeseries where time = 4"; @@ -461,7 +483,6 @@ public static void basicCountActiveDeviceTest(String sql, String columnName, lon try (ResultSet resultSet = statement.executeQuery(sql)) { ResultSetMetaData resultSetMetaData = resultSet.getMetaData(); - Map map = new HashMap<>(); assertEquals(1, resultSetMetaData.getColumnCount()); assertEquals(columnName, resultSetMetaData.getColumnName(1)); int cnt = 0; diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBCreateTimeseriesIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBCreateTimeseriesIT.java index fe9832422c0b..9bb22ea599a4 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBCreateTimeseriesIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/schema/IoTDBCreateTimeseriesIT.java @@ -38,6 +38,7 @@ import java.util.HashSet; import java.util.Set; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.fail; /** @@ -264,4 +265,34 @@ public void testQueryDataFromTimeSeriesWithoutData() { } Assert.assertEquals(0, cnt); } + + @Test + public void testIllegalInput() { + try (Connection connection = EnvFactory.getEnv().getConnection(); + Statement statement = connection.createStatement()) { + statement.execute("create timeseries root.sg2.d.s1 with datatype=INT64"); + assertThrows( + "Unsupported datatype: UNKNOWN", + SQLException.class, + () -> statement.execute("create timeseries root.sg2.d.s1 with datatype=UNKNOWN")); + assertThrows( + "Unsupported datatype: VECTOR", + SQLException.class, + () -> statement.execute("create timeseries root.sg2.d.s1 with datatype=VECTOR")); + assertThrows( + "Unsupported datatype: YES", + SQLException.class, + () -> statement.execute("create timeseries root.sg2.d.s1 with datatype=YES")); + assertThrows( + "Unsupported datatype: UNKNOWN", + SQLException.class, + () -> statement.execute("create device template t1 (s1 UNKNOWN, s2 boolean)")); + assertThrows( + "Unsupported datatype: VECTOR", + SQLException.class, + () -> statement.execute("create device template t1 (s1 VECTOR, s2 boolean)")); + } catch (SQLException ignored) { + fail(); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/selectinto/IoTDBSelectIntoIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/selectinto/IoTDBSelectIntoIT.java index 0f8ca0df38a9..fc3f71df0aee 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/selectinto/IoTDBSelectIntoIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/selectinto/IoTDBSelectIntoIT.java @@ -91,7 +91,7 @@ public class IoTDBSelectIntoIT { SELECT_INTO_SQL_LIST.add("CREATE DATABASE root.sg_type"); for (int deviceId = 0; deviceId < 6; deviceId++) { for (TSDataType dataType : TSDataType.values()) { - if (!dataType.equals(TSDataType.VECTOR)) { + if (!dataType.equals(TSDataType.VECTOR) && !dataType.equals(TSDataType.UNKNOWN)) { SELECT_INTO_SQL_LIST.add( String.format( "CREATE TIMESERIES root.sg_type.d_%d.s_%s %s", diff --git a/integration-test/src/test/java/org/apache/iotdb/db/it/udaf/IoTDBUDAFOrderByIT.java b/integration-test/src/test/java/org/apache/iotdb/db/it/udaf/IoTDBUDAFOrderByIT.java index 5114ba42d5bc..46f0a35a249e 100644 --- a/integration-test/src/test/java/org/apache/iotdb/db/it/udaf/IoTDBUDAFOrderByIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/db/it/udaf/IoTDBUDAFOrderByIT.java @@ -53,7 +53,7 @@ public class IoTDBUDAFOrderByIT { "CREATE DATABASE root.sg", "CREATE TIMESERIES root.sg.d.num WITH DATATYPE=INT32, ENCODING=RLE", "CREATE TIMESERIES root.sg.d.bigNum WITH DATATYPE=INT64, ENCODING=RLE", - "CREATE TIMESERIES root.sg.d.floatNum WITH DATATYPE=DOUBLE, ENCODING=RLE, 'MAX_POINT_NUMBER'='5'", + "CREATE TIMESERIES root.sg.d.floatNum WITH DATATYPE=DOUBLE, ENCODING=PLAIN", "CREATE TIMESERIES root.sg.d.str WITH DATATYPE=TEXT, ENCODING=PLAIN", "CREATE TIMESERIES root.sg.d.bool WITH DATATYPE=BOOLEAN, ENCODING=PLAIN", "insert into root.sg.d(timestamp,num,bigNum,floatNum,str,bool) values(0,3,2947483648,231.2121,\"coconut\",FALSE)", @@ -74,7 +74,7 @@ public class IoTDBUDAFOrderByIT { "flush", "CREATE TIMESERIES root.sg.d2.num WITH DATATYPE=INT32, ENCODING=RLE", "CREATE TIMESERIES root.sg.d2.bigNum WITH DATATYPE=INT64, ENCODING=RLE", - "CREATE TIMESERIES root.sg.d2.floatNum WITH DATATYPE=DOUBLE, ENCODING=RLE, 'MAX_POINT_NUMBER'='5'", + "CREATE TIMESERIES root.sg.d2.floatNum WITH DATATYPE=DOUBLE, ENCODING=PLAIN", "CREATE TIMESERIES root.sg.d2.str WITH DATATYPE=TEXT, ENCODING=PLAIN", "CREATE TIMESERIES root.sg.d2.bool WITH DATATYPE=BOOLEAN, ENCODING=PLAIN", "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(0,3,2947483648,231.2121,\"coconut\",FALSE)", @@ -92,7 +92,6 @@ public class IoTDBUDAFOrderByIT { "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(41536000020,14,2907483648,231.34,\"cherry\",FALSE)", "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(41536900000,13,2107483648,54.12,\"lychee\",TRUE)", "insert into root.sg.d2(timestamp,num,bigNum,floatNum,str,bool) values(51536000000,15,3147483648,235.213,\"watermelon\",TRUE)", - "flush", }; @BeforeClass diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java index 1c021b074215..c00814cab0d4 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoConflictIT.java @@ -188,8 +188,13 @@ public void testDoubleLivingAutoConflict() throws Exception { TestUtils.assertDataEventuallyOnEnv( receiverEnv, "select * from root.**", "Time,root.db.d1.s1,", expectedResSet); - TestUtils.restartCluster(senderEnv); - TestUtils.restartCluster(receiverEnv); + try { + TestUtils.restartCluster(senderEnv); + TestUtils.restartCluster(receiverEnv); + } catch (final Throwable e) { + e.printStackTrace(); + return; + } for (int i = 400; i < 500; ++i) { if (!TestUtils.tryExecuteNonQueryWithRetry( diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoDropIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoDropIT.java index ecd0b2d5775a..60c0ba37d4b2 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoDropIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeAutoDropIT.java @@ -34,6 +34,7 @@ import java.util.Map; public class IoTDBPipeAutoDropIT extends AbstractPipeDualAutoIT { + @Test public void testAutoDropInHistoricalTransfer() throws Exception { final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); @@ -70,10 +71,69 @@ public void testAutoDropInHistoricalTransfer() throws Exception { Assert.assertEquals( TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.startPipe("p1").getCode()); + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "select count(*) from root.**", + "count(root.db.d1.s1),", + Collections.singleton("1,")); + + TestUtils.assertDataEventuallyOnEnv( + senderEnv, + "show pipes", + "ID,CreationTime,State,PipeSource,PipeProcessor,PipeSink,ExceptionMessage,RemainingEventCount,EstimatedRemainingSeconds,", + Collections.emptySet()); + } + } + + @Test + public void testAutoDropInHistoricalTransferWithTimeRange() throws Exception { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + + if (!TestUtils.tryExecuteNonQueryWithRetry( + senderEnv, + "insert into root.db.d1(time, s1) values (1000, 1), (2000, 2), (3000, 3), (4000, 4), (5000, 5)")) { + return; + } + + final Map extractorAttributes = new HashMap<>(); + final Map processorAttributes = new HashMap<>(); + final Map connectorAttributes = new HashMap<>(); + + extractorAttributes.put("extractor.mode", "query"); + extractorAttributes.put("extractor.start-time", "1970-01-01T08:00:02+08:00"); + extractorAttributes.put("extractor.end-time", "1970-01-01T08:00:04+08:00"); + + connectorAttributes.put("connector", "iotdb-thrift-connector"); + connectorAttributes.put("connector.batch.enable", "false"); + connectorAttributes.put("connector.ip", receiverIp); + connectorAttributes.put("connector.port", Integer.toString(receiverPort)); + + final TSStatus status = + client.createPipe( + new TCreatePipeReq("p1", connectorAttributes) + .setExtractorAttributes(extractorAttributes) + .setProcessorAttributes(processorAttributes)); + + Assert.assertEquals(TSStatusCode.SUCCESS_STATUS.getStatusCode(), status.getCode()); + Assert.assertEquals( + TSStatusCode.SUCCESS_STATUS.getStatusCode(), client.startPipe("p1").getCode()); + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, + "select count(*) from root.**", + "count(root.db.d1.s1),", + Collections.singleton("3,")); + TestUtils.assertDataEventuallyOnEnv( senderEnv, "show pipes", - "ID,CreationTime,State,PipeSource,PipeProcessor,PipeSink,ExceptionMessage,", + "ID,CreationTime,State,PipeSource,PipeProcessor,PipeSink,ExceptionMessage,RemainingEventCount,EstimatedRemainingSeconds,", Collections.emptySet()); } } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java index aeab3f74463f..a017cf112d7a 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeClusterIT.java @@ -226,7 +226,7 @@ public void testPipeAfterDataRegionLeaderStop() throws Exception { leaderIndex = i; try { senderEnv.shutdownDataNode(i); - } catch (final Exception e) { + } catch (final Throwable e) { e.printStackTrace(); return; } @@ -237,7 +237,7 @@ public void testPipeAfterDataRegionLeaderStop() throws Exception { try { senderEnv.startDataNode(i); ((AbstractEnv) senderEnv).checkClusterStatusWithoutUnknown(); - } catch (final Exception e) { + } catch (final Throwable e) { e.printStackTrace(); return; } @@ -261,8 +261,13 @@ public void testPipeAfterDataRegionLeaderStop() throws Exception { Collections.singleton("2,")); } - TestUtils.restartCluster(senderEnv); - TestUtils.restartCluster(receiverEnv); + try { + TestUtils.restartCluster(senderEnv); + TestUtils.restartCluster(receiverEnv); + } catch (final Throwable e) { + e.printStackTrace(); + return; + } try (final SyncConfigNodeIServiceClient client = (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { @@ -338,7 +343,7 @@ public void testPipeAfterRegisterNewDataNode() throws Exception { try { senderEnv.registerNewDataNode(true); - } catch (final Exception e) { + } catch (final Throwable e) { e.printStackTrace(); return; } @@ -357,8 +362,13 @@ public void testPipeAfterRegisterNewDataNode() throws Exception { Collections.singleton("2,")); } - TestUtils.restartCluster(senderEnv); - TestUtils.restartCluster(receiverEnv); + try { + TestUtils.restartCluster(senderEnv); + TestUtils.restartCluster(receiverEnv); + } catch (final Throwable e) { + e.printStackTrace(); + return; + } try (final SyncConfigNodeIServiceClient client = (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { @@ -439,7 +449,7 @@ public void testCreatePipeWhenRegisteringNewDataNode() throws Exception { t.start(); try { senderEnv.registerNewDataNode(true); - } catch (final Exception e) { + } catch (final Throwable e) { e.printStackTrace(); return; } @@ -500,7 +510,7 @@ public void testRegisteringNewDataNodeWhenTransferringData() throws Exception { t.start(); try { senderEnv.registerNewDataNode(true); - } catch (final Exception e) { + } catch (final Throwable e) { e.printStackTrace(); return; } @@ -518,7 +528,7 @@ public void testRegisteringNewDataNodeWhenTransferringData() throws Exception { try { senderEnv.shutdownDataNode(senderEnv.getDataNodeWrapperList().size() - 1); senderEnv.getDataNodeWrapperList().remove(senderEnv.getDataNodeWrapperList().size() - 1); - } catch (final Exception e) { + } catch (final Throwable e) { e.printStackTrace(); } } @@ -562,7 +572,7 @@ public void testRegisteringNewDataNodeAfterTransferringData() throws Exception { try { senderEnv.registerNewDataNode(true); - } catch (final Exception e) { + } catch (final Throwable e) { e.printStackTrace(); return; } @@ -580,7 +590,7 @@ public void testRegisteringNewDataNodeAfterTransferringData() throws Exception { try { senderEnv.shutdownDataNode(senderEnv.getDataNodeWrapperList().size() - 1); senderEnv.getDataNodeWrapperList().remove(senderEnv.getDataNodeWrapperList().size() - 1); - } catch (final Exception e) { + } catch (final Throwable e) { e.printStackTrace(); } } @@ -634,7 +644,7 @@ public void testNewDataNodeFailureParallelToTransferringData() throws Exception senderEnv.shutdownDataNode(senderEnv.getDataNodeWrapperList().size() - 1); senderEnv.getDataNodeWrapperList().remove(senderEnv.getDataNodeWrapperList().size() - 1); ((AbstractEnv) senderEnv).checkClusterStatusWithoutUnknown(); - } catch (final Exception e) { + } catch (final Throwable e) { e.printStackTrace(); return; } @@ -695,7 +705,13 @@ public void testSenderRestartWhenTransferring() throws Exception { return; } - TestUtils.restartCluster(senderEnv); + try { + TestUtils.restartCluster(senderEnv); + } catch (final Throwable e) { + e.printStackTrace(); + return; + } + TestUtils.assertDataEventuallyOnEnv( receiverEnv, "select count(*) from root.**", diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java index be4aa458d9fb..f7c0c63b842e 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeConnectorCompressionIT.java @@ -22,6 +22,8 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.client.sync.SyncConfigNodeIServiceClient; import org.apache.iotdb.confignode.rpc.thrift.TCreatePipeReq; +import org.apache.iotdb.confignode.rpc.thrift.TShowPipeInfo; +import org.apache.iotdb.confignode.rpc.thrift.TShowPipeReq; import org.apache.iotdb.consensus.ConsensusFactory; import org.apache.iotdb.db.it.utils.TestUtils; import org.apache.iotdb.it.env.MultiEnvFactory; @@ -36,11 +38,17 @@ import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; +import java.sql.Connection; +import java.sql.SQLException; +import java.sql.Statement; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import static org.junit.Assert.fail; + @RunWith(IoTDBTestRunner.class) @Category({MultiClusterIT2AutoCreateSchema.class}) public class IoTDBPipeConnectorCompressionIT extends AbstractPipeDualAutoIT { @@ -179,4 +187,122 @@ private void doTest( Collections.singleton("8,")); } } + + @Test + public void testZstdCompressorLevel() throws Exception { + final DataNodeWrapper receiverDataNode = receiverEnv.getDataNodeWrapper(0); + + final String receiverIp = receiverDataNode.getIp(); + final int receiverPort = receiverDataNode.getPort(); + + try (final SyncConfigNodeIServiceClient client = + (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { + if (!TestUtils.tryExecuteNonQueriesWithRetry( + senderEnv, + Arrays.asList( + "insert into root.db.d1(time, s1) values (1, 1)", + "insert into root.db.d1(time, s2) values (1, 1)", + "insert into root.db.d1(time, s3) values (1, 1)", + "insert into root.db.d1(time, s4) values (1, 1)", + "insert into root.db.d1(time, s5) values (1, 1)", + "flush"))) { + return; + } + + // Create 5 pipes with different zstd compression levels, p4 and p5 should fail. + + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute( + String.format( + "create pipe p1" + + " with extractor ('extractor.pattern'='root.db.d1.s1')" + + " with connector (" + + "'connector.ip'='%s'," + + "'connector.port'='%s'," + + "'connector.compressor'='zstd, zstd'," + + "'connector.compressor.zstd.level'='3')", + receiverIp, receiverPort)); + } catch (SQLException e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute( + String.format( + "create pipe p2" + + " with extractor ('extractor.pattern'='root.db.d1.s2')" + + " with connector (" + + "'connector.ip'='%s'," + + "'connector.port'='%s'," + + "'connector.compressor'='zstd, zstd'," + + "'connector.compressor.zstd.level'='22')", + receiverIp, receiverPort)); + } catch (SQLException e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute( + String.format( + "create pipe p3" + + " with extractor ('extractor.pattern'='root.db.d1.s3')" + + " with connector (" + + "'connector.ip'='%s'," + + "'connector.port'='%s'," + + "'connector.compressor'='zstd, zstd'," + + "'connector.compressor.zstd.level'='-131072')", + receiverIp, receiverPort)); + } catch (SQLException e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute( + String.format( + "create pipe p4" + + " with extractor ('extractor.pattern'='root.db.d1.s4')" + + " with connector (" + + "'connector.ip'='%s'," + + "'connector.port'='%s'," + + "'connector.compressor'='zstd, zstd'," + + "'connector.compressor.zstd.level'='-131073')", + receiverIp, receiverPort)); + fail(); + } catch (SQLException e) { + // Make sure the error message in IoTDBConnector.java is returned + Assert.assertTrue(e.getMessage().contains("Zstd compression level should be in the range")); + } + + try (final Connection connection = senderEnv.getConnection(); + final Statement statement = connection.createStatement()) { + statement.execute( + String.format( + "create pipe p5" + + " with extractor ('extractor.pattern'='root.db.d1.s5')" + + " with connector (" + + "'connector.ip'='%s'," + + "'connector.port'='%s'," + + "'connector.compressor'='zstd, zstd'," + + "'connector.compressor.zstd.level'='23')", + receiverIp, receiverPort)); + fail(); + } catch (SQLException e) { + // Make sure the error message in IoTDBConnector.java is returned + Assert.assertTrue(e.getMessage().contains("Zstd compression level should be in the range")); + } + + final List showPipeResult = client.showPipe(new TShowPipeReq()).pipeInfoList; + Assert.assertEquals(3, showPipeResult.size()); + + TestUtils.assertDataEventuallyOnEnv( + receiverEnv, "count timeseries", "count(timeseries),", Collections.singleton("3,")); + } + } } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeLifeCycleIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeLifeCycleIT.java index 745376aa9733..87e00151db15 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeLifeCycleIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/autocreate/IoTDBPipeLifeCycleIT.java @@ -465,8 +465,13 @@ public void testLifeCycleWithClusterRestart() throws Exception { receiverEnv, "select * from root.**", "Time,root.db.d1.s1,", expectedResSet); } - TestUtils.restartCluster(senderEnv); - TestUtils.restartCluster(receiverEnv); + try { + TestUtils.restartCluster(senderEnv); + TestUtils.restartCluster(receiverEnv); + } catch (final Throwable e) { + e.printStackTrace(); + return; + } try (final SyncConfigNodeIServiceClient ignored = (SyncConfigNodeIServiceClient) senderEnv.getLeaderConfigNodeConnection()) { @@ -529,7 +534,18 @@ public void testReceiverRestartWhenTransferring() throws Exception { }); t.start(); - TestUtils.restartCluster(receiverEnv); + try { + TestUtils.restartCluster(receiverEnv); + } catch (final Throwable e) { + e.printStackTrace(); + try { + t.interrupt(); + t.join(); + } catch (Throwable ignored) { + } + return; + } + t.join(); if (!TestUtils.tryExecuteNonQueryWithRetry(senderEnv, "flush")) { return; @@ -711,8 +727,13 @@ public void testDoubleLiving() throws Exception { TestUtils.assertDataEventuallyOnEnv( receiverEnv, "select * from root.**", "Time,root.db.d1.s1,", expectedResSet); - TestUtils.restartCluster(senderEnv); - TestUtils.restartCluster(receiverEnv); + try { + TestUtils.restartCluster(senderEnv); + TestUtils.restartCluster(receiverEnv); + } catch (final Throwable e) { + e.printStackTrace(); + return; + } for (int i = 400; i < 500; ++i) { if (!TestUtils.tryExecuteNonQueryWithRetry( diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaLeaderChangeIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaLeaderChangeIT.java index fb70e8e37838..b05e695ad3bb 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaLeaderChangeIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaLeaderChangeIT.java @@ -173,7 +173,7 @@ public void testSchemaRegionLeaderChange() throws Exception { try { index = senderEnv.getFirstLeaderSchemaRegionDataNodeIndex(); senderEnv.shutdownDataNode(index); - } catch (final Exception e) { + } catch (final Throwable e) { e.printStackTrace(); return; } diff --git a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaRestartIT.java b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaRestartIT.java index 8c19a5e2160c..76cd4a90e8b8 100644 --- a/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaRestartIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/pipe/it/manual/IoTDBPipeMetaRestartIT.java @@ -84,8 +84,13 @@ public void testAutoRestartSchemaTask() throws Exception { } } - TestUtils.restartCluster(senderEnv); - TestUtils.restartCluster(receiverEnv); + try { + TestUtils.restartCluster(senderEnv); + TestUtils.restartCluster(receiverEnv); + } catch (Throwable e) { + e.printStackTrace(); + return; + } for (int i = 10; i < 20; ++i) { if (!TestUtils.tryExecuteNonQueryWithRetry( @@ -142,8 +147,13 @@ public void testAutoRestartConfigTask() throws Exception { } } - TestUtils.restartCluster(senderEnv); - TestUtils.restartCluster(receiverEnv); + try { + TestUtils.restartCluster(senderEnv); + TestUtils.restartCluster(receiverEnv); + } catch (final Throwable e) { + e.printStackTrace(); + return; + } for (int i = 10; i < 20; ++i) { if (!TestUtils.tryExecuteNonQueryWithRetry( diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java index 1d0ef260ed48..9aa01ad4acfb 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/AbstractSubscriptionDualIT.java @@ -36,19 +36,20 @@ public void setUp() { senderEnv = MultiEnvFactory.getEnv(0); receiverEnv = MultiEnvFactory.getEnv(1); + setUpConfig(); + + senderEnv.initClusterEnvironment(); + receiverEnv.initClusterEnvironment(); + } + + void setUpConfig() { // enable auto create schema senderEnv.getConfig().getCommonConfig().setAutoCreateSchemaEnabled(true); receiverEnv.getConfig().getCommonConfig().setAutoCreateSchemaEnabled(true); - // for IoTDBSubscriptionConsumerGroupIT - receiverEnv.getConfig().getCommonConfig().setPipeAirGapReceiverEnabled(true); - // 10 min, assert that the operations will not time out senderEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); receiverEnv.getConfig().getCommonConfig().setCnConnectionTimeoutMs(600000); - - senderEnv.initClusterEnvironment(); - receiverEnv.initClusterEnvironment(); } @After diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java index 07d5b4ed791b..992d151520f9 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionConsumerGroupIT.java @@ -108,6 +108,15 @@ static final class SubscriptionInfo { } } + @Override + void setUpConfig() { + super.setUpConfig(); + + // Enable air gap receiver + receiverEnv.getConfig().getCommonConfig().setPipeAirGapReceiverEnabled(true); + } + + @Override @Before public void setUp() { super.setUp(); diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java new file mode 100644 index 000000000000..2b1cc407b7f7 --- /dev/null +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTimePrecisionIT.java @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.subscription.it.dual; + +import org.apache.iotdb.db.it.utils.TestUtils; +import org.apache.iotdb.isession.ISession; +import org.apache.iotdb.it.framework.IoTDBTestRunner; +import org.apache.iotdb.itbase.category.MultiClusterIT2Subscription; +import org.apache.iotdb.rpc.subscription.config.TopicConstant; +import org.apache.iotdb.session.subscription.SubscriptionSession; +import org.apache.iotdb.session.subscription.consumer.SubscriptionPullConsumer; +import org.apache.iotdb.session.subscription.payload.SubscriptionMessage; +import org.apache.iotdb.subscription.it.IoTDBSubscriptionITConstant; + +import org.apache.tsfile.write.record.Tablet; +import org.awaitility.Awaitility; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.runner.RunWith; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.LockSupport; + +import static org.junit.Assert.fail; + +@RunWith(IoTDBTestRunner.class) +@Category({MultiClusterIT2Subscription.class}) +public class IoTDBSubscriptionTimePrecisionIT extends AbstractSubscriptionDualIT { + + private static final Logger LOGGER = + LoggerFactory.getLogger(IoTDBSubscriptionTimePrecisionIT.class); + + @Override + void setUpConfig() { + super.setUpConfig(); + + // Set timestamp precision to nanosecond + senderEnv.getConfig().getCommonConfig().setTimestampPrecision("ns"); + receiverEnv.getConfig().getCommonConfig().setTimestampPrecision("ns"); + } + + @Test + public void testTopicTimePrecision() throws Exception { + final String host = senderEnv.getIP(); + final int port = Integer.parseInt(senderEnv.getPort()); + + // Insert some historical data on sender + final long currentTime1 = System.currentTimeMillis() * 1000_000L; // in nanosecond + try (final ISession session = senderEnv.getSessionConnection()) { + for (int i = 0; i < 100; ++i) { + session.executeNonQueryStatement( + String.format("insert into root.db.d1(time, s1) values (%s, 1)", i)); + session.executeNonQueryStatement( + String.format("insert into root.db.d1(time, s2) values (%s, 1)", currentTime1 - i)); + } + session.executeNonQueryStatement("flush"); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + // Create topic on sender + final String topic1 = "topic1"; + final String topic2 = "topic2"; + try (final SubscriptionSession session = new SubscriptionSession(host, port)) { + session.open(); + { + final Properties config = new Properties(); + config.put(TopicConstant.START_TIME_KEY, currentTime1 - 99); + config.put( + TopicConstant.END_TIME_KEY, + TopicConstant.NOW_TIME_VALUE); // now should be strictly larger than current time 1 + session.createTopic(topic1, config); + } + { + final Properties config = new Properties(); + config.put( + TopicConstant.START_TIME_KEY, + TopicConstant.NOW_TIME_VALUE); // now should be strictly smaller than current time 2 + session.createTopic(topic2, config); + } + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + // Insert some historical data on sender again + final long currentTime2 = System.currentTimeMillis() * 1000_000L; // in nanosecond + try (final ISession session = senderEnv.getSessionConnection()) { + for (int i = 0; i < 100; ++i) { + session.executeNonQueryStatement( + String.format("insert into root.db.d2(time, s1) values (%s, 1)", currentTime2 + i)); + } + session.executeNonQueryStatement("flush"); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + // Subscribe on sender and insert on receiver + final AtomicBoolean isClosed = new AtomicBoolean(false); + final Thread thread = + new Thread( + () -> { + try (final SubscriptionPullConsumer consumer = + new SubscriptionPullConsumer.Builder() + .host(host) + .port(port) + .consumerId("c1") + .consumerGroupId("cg1") + .autoCommit(false) + .buildPullConsumer(); + final ISession session = receiverEnv.getSessionConnection()) { + consumer.open(); + consumer.subscribe(topic1, topic2); + while (!isClosed.get()) { + LockSupport.parkNanos(IoTDBSubscriptionITConstant.SLEEP_NS); // wait some time + final List messages = + consumer.poll(IoTDBSubscriptionITConstant.POLL_TIMEOUT_MS); + for (final SubscriptionMessage message : messages) { + for (final Iterator it = + message.getSessionDataSetsHandler().tabletIterator(); + it.hasNext(); ) { + final Tablet tablet = it.next(); + session.insertTablet(tablet); + } + } + consumer.commitSync(messages); + } + // Auto unsubscribe topics + } catch (final Exception e) { + e.printStackTrace(); + // Avoid failure + } finally { + LOGGER.info("consumer exiting..."); + } + }); + thread.start(); + + // Check data on receiver + try { + try (final Connection connection = receiverEnv.getConnection(); + final Statement statement = connection.createStatement()) { + // Keep retrying if there are execution failures + Awaitility.await() + .pollDelay(IoTDBSubscriptionITConstant.AWAITILITY_POLL_DELAY_SECOND, TimeUnit.SECONDS) + .pollInterval( + IoTDBSubscriptionITConstant.AWAITILITY_POLL_INTERVAL_SECOND, TimeUnit.SECONDS) + .atMost(IoTDBSubscriptionITConstant.AWAITILITY_AT_MOST_SECOND, TimeUnit.SECONDS) + .untilAsserted( + () -> + TestUtils.assertSingleResultSetEqual( + TestUtils.executeQueryWithRetry(statement, "select count(*) from root.**"), + new HashMap() { + { + put("count(root.db.d1.s2)", "100"); + put("count(root.db.d2.s1)", "100"); + } + })); + } + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } finally { + isClosed.set(true); + thread.join(); + } + } +} diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java index 7091c93b4dbc..0cd6bc0d0a10 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/dual/IoTDBSubscriptionTopicIT.java @@ -506,7 +506,7 @@ public void testTopicInvalidTimeRangeConfig() throws Exception { session.open(); final Properties properties = new Properties(); properties.put(TopicConstant.START_TIME_KEY, "2024-01-32"); - properties.put(TopicConstant.END_TIME_KEY, "now"); + properties.put(TopicConstant.END_TIME_KEY, TopicConstant.NOW_TIME_VALUE); session.createTopic("topic1", properties); fail(); } catch (final Exception ignored) { diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionBasicIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionBasicIT.java index 46b697e44db6..13a5e8abc261 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionBasicIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionBasicIT.java @@ -60,6 +60,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.LockSupport; import java.util.stream.Collectors; @@ -181,7 +182,7 @@ public void testBasicSubscribeTsFile() throws Exception { } // Create topic - final String topicName = "topic2"; + final String topicName = "topic1"; final String host = EnvFactory.getEnv().getIP(); final int port = Integer.parseInt(EnvFactory.getEnv().getPort()); try (final SubscriptionSession session = new SubscriptionSession(host, port)) { @@ -297,11 +298,12 @@ public void testBasicPullConsumerWithCommitAsync() throws Exception { } // Create topic + final String topicName = "topic1"; final String host = EnvFactory.getEnv().getIP(); final int port = Integer.parseInt(EnvFactory.getEnv().getPort()); try (final SubscriptionSession session = new SubscriptionSession(host, port)) { session.open(); - session.createTopic("topic1"); + session.createTopic(topicName); } catch (final Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -325,7 +327,7 @@ public void testBasicPullConsumerWithCommitAsync() throws Exception { .autoCommit(false) .buildPullConsumer()) { consumer.open(); - consumer.subscribe("topic1"); + consumer.subscribe(topicName); while (!isClosed.get()) { LockSupport.parkNanos(IoTDBSubscriptionITConstant.SLEEP_NS); // wait some time final List messages = @@ -370,7 +372,7 @@ public void onFailure(final Throwable e) { } }); } - consumer.unsubscribe("topic1"); + consumer.unsubscribe(topicName); } catch (final Exception e) { e.printStackTrace(); // avoid fail @@ -450,11 +452,12 @@ public void testBasicPushConsumer() { } // Create topic + final String topicName = "topic1"; final String host = EnvFactory.getEnv().getIP(); final int port = Integer.parseInt(EnvFactory.getEnv().getPort()); try (final SubscriptionSession session = new SubscriptionSession(host, port)) { session.open(); - session.createTopic("topic1"); + session.createTopic(topicName); } catch (final Exception e) { e.printStackTrace(); fail(e.getMessage()); @@ -480,7 +483,7 @@ public void testBasicPushConsumer() { .buildPushConsumer()) { consumer.open(); - consumer.subscribe("topic1"); + consumer.subscribe(topicName); // The push consumer should automatically poll 10 rows of data by 1 onReceive() Awaitility.await() @@ -537,11 +540,112 @@ public void testBasicPushConsumer() { Assert.assertTrue(onReceiveCount.get() > lastOnReceiveCount.get()); }); - consumer.unsubscribe("topic1"); + consumer.unsubscribe(topicName); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + } + @Test + public void testPollUnsubscribedTopics() throws Exception { + // Insert some historical data + try (final ISession session = EnvFactory.getEnv().getSessionConnection()) { + for (int i = 0; i < 100; ++i) { + session.executeNonQueryStatement( + String.format("insert into root.db.d1(time, s1) values (%s, 1)", i)); + } + for (int i = 100; i < 200; ++i) { + session.executeNonQueryStatement( + String.format("insert into root.db.d1(time, s1) values (%s, 1)", i)); + } + session.executeNonQueryStatement("flush"); } catch (final Exception e) { e.printStackTrace(); fail(e.getMessage()); } + + // Create topic + final String host = EnvFactory.getEnv().getIP(); + final int port = Integer.parseInt(EnvFactory.getEnv().getPort()); + try (final SubscriptionSession session = new SubscriptionSession(host, port)) { + session.open(); + { + final Properties properties = new Properties(); + properties.put(TopicConstant.END_TIME_KEY, 99); + session.createTopic("topic1", properties); + } + { + final Properties properties = new Properties(); + properties.put(TopicConstant.START_TIME_KEY, 100); + session.createTopic("topic2", properties); + } + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } + + // Subscription + final AtomicInteger rowCount = new AtomicInteger(); + final AtomicLong timestampSum = new AtomicLong(); + final AtomicBoolean isClosed = new AtomicBoolean(false); + final Thread thread = + new Thread( + () -> { + try (final SubscriptionPullConsumer consumer = + new SubscriptionPullConsumer.Builder() + .host(host) + .port(port) + .consumerId("c1") + .consumerGroupId("cg1") + .autoCommit(false) + .buildPullConsumer()) { + consumer.open(); + consumer.subscribe("topic2"); // only subscribe topic2 + while (!isClosed.get()) { + LockSupport.parkNanos(IoTDBSubscriptionITConstant.SLEEP_NS); // wait some time + final List messages = + consumer.poll(IoTDBSubscriptionITConstant.POLL_TIMEOUT_MS); + for (final SubscriptionMessage message : messages) { + for (final SubscriptionSessionDataSet dataSet : + message.getSessionDataSetsHandler()) { + while (dataSet.hasNext()) { + timestampSum.getAndAdd(dataSet.next().getTimestamp()); + rowCount.addAndGet(1); + } + } + } + consumer.commitSync(messages); + } + // automatically unsubscribe topics when closing + } catch (final Exception e) { + e.printStackTrace(); + // Avoid failure + } finally { + LOGGER.info("consumer exiting..."); + } + }); + thread.start(); + + // Check row count + try { + // Keep retrying if there are execution failures + Awaitility.await() + .pollDelay(IoTDBSubscriptionITConstant.AWAITILITY_POLL_DELAY_SECOND, TimeUnit.SECONDS) + .pollInterval( + IoTDBSubscriptionITConstant.AWAITILITY_POLL_INTERVAL_SECOND, TimeUnit.SECONDS) + .atMost(IoTDBSubscriptionITConstant.AWAITILITY_AT_MOST_SECOND, TimeUnit.SECONDS) + .untilAsserted( + () -> { + Assert.assertEquals(100, rowCount.get()); + Assert.assertEquals((100 + 199) * 100 / 2, timestampSum.get()); + }); + } catch (final Exception e) { + e.printStackTrace(); + fail(e.getMessage()); + } finally { + isClosed.set(true); + thread.join(); + } } } diff --git a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionRestartIT.java b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionRestartIT.java index 231845921e13..2764fad56b83 100644 --- a/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionRestartIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/subscription/it/local/IoTDBSubscriptionRestartIT.java @@ -119,7 +119,12 @@ public void testSubscriptionAfterRestartCluster() throws Exception { } // Restart cluster - TestUtils.restartCluster(EnvFactory.getEnv()); + try { + TestUtils.restartCluster(EnvFactory.getEnv()); + } catch (final Throwable e) { + e.printStackTrace(); + return; + } // Show topics and subscriptions try (final SyncConfigNodeIServiceClient client = @@ -254,9 +259,14 @@ public void testSubscriptionAfterRestartDataNode() throws Exception { } // Shutdown DN 1 & DN 2 - Thread.sleep(10000); // wait some time - EnvFactory.getEnv().shutdownDataNode(1); - EnvFactory.getEnv().shutdownDataNode(2); + try { + Thread.sleep(10000); // wait some time + EnvFactory.getEnv().shutdownDataNode(1); + EnvFactory.getEnv().shutdownDataNode(2); + } catch (final Throwable e) { + e.printStackTrace(); + return; + } // Subscription again final Map timestamps = new HashMap<>(); @@ -297,10 +307,15 @@ public void testSubscriptionAfterRestartDataNode() throws Exception { thread.start(); // Start DN 1 & DN 2 - Thread.sleep(10000); // wait some time - EnvFactory.getEnv().startDataNode(1); - EnvFactory.getEnv().startDataNode(2); - ((AbstractEnv) EnvFactory.getEnv()).checkClusterStatusWithoutUnknown(); + try { + Thread.sleep(10000); // wait some time + EnvFactory.getEnv().startDataNode(1); + EnvFactory.getEnv().startDataNode(2); + ((AbstractEnv) EnvFactory.getEnv()).checkClusterStatusWithoutUnknown(); + } catch (final Throwable e) { + e.printStackTrace(); + return; + } // Insert some realtime data try (final ISession session = EnvFactory.getEnv().getSessionConnection()) { diff --git a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportDataTestIT.java b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportDataTestIT.java index ec53326f4274..f04f387fd381 100644 --- a/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportDataTestIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/tools/it/ExportDataTestIT.java @@ -89,7 +89,7 @@ protected void testOnWindows() throws IOException { "root", "-pw", "root", - "-td", + "-t", "target", "-q", "select * from root.test.t2 where time > 1 and time < 1000000000000", @@ -115,7 +115,7 @@ protected void testOnWindows() throws IOException { "root", "-pw", "root", - "-td", + "-t", "target", "-q", "select * from root.test.t2 where time > 1 and time < 1000000000000", @@ -141,7 +141,7 @@ protected void testOnWindows() throws IOException { "root", "-pw", "root", - "-td", + "-t", "target", "-type", "sql", @@ -170,7 +170,7 @@ protected void testOnUnix() throws IOException { "root", "-pw", "root", - "-td", + "-t", "target", "-q", "select * from root.**"); @@ -193,7 +193,7 @@ protected void testOnUnix() throws IOException { "root", "-pw", "root", - "-td", + "-t", "target", "-q", "select * from root.**"); @@ -216,7 +216,7 @@ protected void testOnUnix() throws IOException { "root", "-pw", "root", - "-td", + "-t", "target", "-type", "sql", diff --git a/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportDataTestIT.java b/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportDataTestIT.java index 625d2b9481c1..391b726f6d73 100644 --- a/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportDataTestIT.java +++ b/integration-test/src/test/java/org/apache/iotdb/tools/it/ImportDataTestIT.java @@ -25,8 +25,8 @@ import org.apache.iotdb.itbase.category.ClusterIT; import org.apache.iotdb.itbase.category.LocalStandaloneIT; -import org.junit.After; -import org.junit.Before; +import org.junit.AfterClass; +import org.junit.BeforeClass; import org.junit.Test; import org.junit.experimental.categories.Category; import org.junit.runner.RunWith; @@ -46,7 +46,7 @@ public class ImportDataTestIT extends AbstractScript { private static String libPath; - @Before + @BeforeClass public static void setUp() { EnvFactory.getEnv().initClusterEnvironment(); ip = EnvFactory.getEnv().getIP(); @@ -55,7 +55,7 @@ public static void setUp() { libPath = EnvFactory.getEnv().getLibPath(); } - @After + @AfterClass public static void tearDown() { EnvFactory.getEnv().cleanClusterEnvironment(); } @@ -74,7 +74,7 @@ public void test() throws IOException { @Override protected void testOnWindows() throws IOException { final String[] output = { - "The file name must end with \"csv\" or \"txt\" or \"sql\"!", + "The file name must end with \"csv\" or \"txt\"!", }; ProcessBuilder builder = new ProcessBuilder( @@ -89,7 +89,7 @@ protected void testOnWindows() throws IOException { "root", "-pw", "root", - "-f", + "-s", "./", "&", "exit", @@ -101,7 +101,7 @@ protected void testOnWindows() throws IOException { @Override protected void testOnUnix() throws IOException { final String[] output = { - "The file name must end with \"csv\" or \"txt\" or \"sql\"!", + "The file name must end with \"csv\" or \"txt\"!", }; ProcessBuilder builder = new ProcessBuilder( @@ -115,7 +115,7 @@ protected void testOnUnix() throws IOException { "root", "-pw", "root", - "-f", + "-s", "./"); builder.environment().put("CLASSPATH", libPath); testOutput(builder, output, 0); diff --git a/iotdb-api/external-api/pom.xml b/iotdb-api/external-api/pom.xml index 791019515461..860e1453757c 100644 --- a/iotdb-api/external-api/pom.xml +++ b/iotdb-api/external-api/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT external-api IoTDB: API: External API diff --git a/iotdb-api/pipe-api/pom.xml b/iotdb-api/pipe-api/pom.xml index 10944b63e75c..c0e4338b3ef1 100644 --- a/iotdb-api/pipe-api/pom.xml +++ b/iotdb-api/pipe-api/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT pipe-api IoTDB: API: Pipe API diff --git a/iotdb-api/pom.xml b/iotdb-api/pom.xml index 3744f1f5b1e4..f29b64894b6f 100644 --- a/iotdb-api/pom.xml +++ b/iotdb-api/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-api pom diff --git a/iotdb-api/trigger-api/pom.xml b/iotdb-api/trigger-api/pom.xml index 88cb4ed0bab6..49b192aebe70 100644 --- a/iotdb-api/trigger-api/pom.xml +++ b/iotdb-api/trigger-api/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT trigger-api IoTDB: API: Trigger API diff --git a/iotdb-api/udf-api/pom.xml b/iotdb-api/udf-api/pom.xml index d11ae4dbceb2..f6b5897c826b 100644 --- a/iotdb-api/udf-api/pom.xml +++ b/iotdb-api/udf-api/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT udf-api IoTDB: API: UDF API diff --git a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/utils/RowImpl.java b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/utils/RowImpl.java index e0ca3a53fc65..d7d3a8848690 100644 --- a/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/utils/RowImpl.java +++ b/iotdb-api/udf-api/src/main/java/org/apache/iotdb/udf/api/utils/RowImpl.java @@ -45,36 +45,57 @@ public long getTime() { @Override public int getInt(int columnIndex) { + if (columnIndex >= size()) { + throw new IndexOutOfBoundsException("Index out of bound error!"); + } return (int) rowRecord[columnIndex]; } @Override public long getLong(int columnIndex) { + if (columnIndex >= size()) { + throw new IndexOutOfBoundsException("Index out of bound error!"); + } return (long) rowRecord[columnIndex]; } @Override public float getFloat(int columnIndex) { + if (columnIndex >= size()) { + throw new IndexOutOfBoundsException("Index out of bound error!"); + } return (float) rowRecord[columnIndex]; } @Override public double getDouble(int columnIndex) { + if (columnIndex >= size()) { + throw new IndexOutOfBoundsException("Index out of bound error!"); + } return (double) rowRecord[columnIndex]; } @Override public boolean getBoolean(int columnIndex) { + if (columnIndex >= size()) { + throw new IndexOutOfBoundsException("Index out of bound error!"); + } return (boolean) rowRecord[columnIndex]; } @Override public Binary getBinary(int columnIndex) { + if (columnIndex >= size()) { + throw new IndexOutOfBoundsException("Index out of bound error!"); + } return transformToUDFBinary((org.apache.tsfile.utils.Binary) rowRecord[columnIndex]); } @Override public String getString(int columnIndex) { + if (columnIndex >= size()) { + throw new IndexOutOfBoundsException("Index out of bound error!"); + } return rowRecord[columnIndex].toString(); } diff --git a/iotdb-client/cli/pom.xml b/iotdb-client/cli/pom.xml index c8451aac10ed..cc62e9e8d8d5 100644 --- a/iotdb-client/cli/pom.xml +++ b/iotdb-client/cli/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-cli IoTDB: Client: CLI @@ -37,37 +37,37 @@ org.apache.iotdb iotdb-session - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-jdbc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-antlr - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb node-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-server - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb isession - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb service-rpc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.tsfile @@ -82,7 +82,7 @@ org.apache.iotdb iotdb-thrift - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.slf4j diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractDataTool.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractDataTool.java index 6ae1a1791dcc..1e689e903c1a 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractDataTool.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/AbstractDataTool.java @@ -31,6 +31,7 @@ import org.apache.commons.csv.CSVFormat; import org.apache.commons.csv.CSVPrinter; import org.apache.commons.csv.QuoteMode; +import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -43,17 +44,21 @@ public abstract class AbstractDataTool { protected static final String HOST_ARGS = "h"; protected static final String HOST_NAME = "host"; + protected static final String HOST_DEFAULT_VALUE = "127.0.0.1"; protected static final String HELP_ARGS = "help"; protected static final String PORT_ARGS = "p"; protected static final String PORT_NAME = "port"; + protected static final String PORT_DEFAULT_VALUE = "6667"; protected static final String PW_ARGS = "pw"; protected static final String PW_NAME = "password"; + protected static final String PW_DEFAULT_VALUE = "root"; protected static final String USERNAME_ARGS = "u"; protected static final String USERNAME_NAME = "username"; + protected static final String USERNAME_DEFAULT_VALUE = "root"; protected static final String TIME_FORMAT_ARGS = "tf"; protected static final String TIME_FORMAT_NAME = "timeformat"; @@ -61,7 +66,7 @@ public abstract class AbstractDataTool { protected static final String TIME_ZONE_ARGS = "tz"; protected static final String TIME_ZONE_NAME = "timeZone"; - protected static final String TIMEOUT_ARGS = "t"; + protected static final String TIMEOUT_ARGS = "timeout"; protected static final String TIMEOUT_NAME = "timeout"; protected static final int MAX_HELP_CONSOLE_WIDTH = 92; protected static final String[] TIME_FORMAT = @@ -125,10 +130,14 @@ public abstract class AbstractDataTool { protected AbstractDataTool() {} - protected static String checkRequiredArg(String arg, String name, CommandLine commandLine) + protected static String checkRequiredArg( + String arg, String name, CommandLine commandLine, String defaultValue) throws ArgsErrorException { String str = commandLine.getOptionValue(arg); if (str == null) { + if (StringUtils.isNotBlank(defaultValue)) { + return defaultValue; + } String msg = String.format("Required values for option '%s' not provided", name); LOGGER.info(msg); LOGGER.info("Use -help for more information"); @@ -145,11 +154,10 @@ protected static void setTimeZone() throws IoTDBConnectionException, StatementEx } protected static void parseBasicParams(CommandLine commandLine) throws ArgsErrorException { - host = checkRequiredArg(HOST_ARGS, HOST_NAME, commandLine); - port = checkRequiredArg(PORT_ARGS, PORT_NAME, commandLine); - username = checkRequiredArg(USERNAME_ARGS, USERNAME_NAME, commandLine); - - password = commandLine.getOptionValue(PW_ARGS); + host = checkRequiredArg(HOST_ARGS, HOST_NAME, commandLine, HOST_DEFAULT_VALUE); + port = checkRequiredArg(PORT_ARGS, PORT_NAME, commandLine, PORT_DEFAULT_VALUE); + username = checkRequiredArg(USERNAME_ARGS, USERNAME_NAME, commandLine, USERNAME_DEFAULT_VALUE); + password = commandLine.getOptionValue(PW_ARGS, PW_DEFAULT_VALUE); } protected static boolean checkTimeFormat() { @@ -176,30 +184,27 @@ protected static Options createNewOptions() { Option opHost = Option.builder(HOST_ARGS) .longOpt(HOST_NAME) - .required() .argName(HOST_NAME) .hasArg() - .desc("Host Name (required)") + .desc("Host Name (optional)") .build(); options.addOption(opHost); Option opPort = Option.builder(PORT_ARGS) .longOpt(PORT_NAME) - .required() .argName(PORT_NAME) .hasArg() - .desc("Port (required)") + .desc("Port (optional)") .build(); options.addOption(opPort); Option opUsername = Option.builder(USERNAME_ARGS) .longOpt(USERNAME_NAME) - .required() .argName(USERNAME_NAME) .hasArg() - .desc("Username (required)") + .desc("Username (optional)") .build(); options.addOption(opUsername); @@ -209,7 +214,7 @@ protected static Options createNewOptions() { .optionalArg(true) .argName(PW_NAME) .hasArg() - .desc("Password (required)") + .desc("Password (optional)") .build(); options.addOption(opPassword); return options; diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportData.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportData.java index bbf8750000ed..68f140535a55 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportData.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ExportData.java @@ -67,14 +67,14 @@ */ public class ExportData extends AbstractDataTool { - private static final String TARGET_DIR_ARGS = "td"; + private static final String TARGET_DIR_ARGS = "t"; private static final String TARGET_DIR_NAME = "targetDirectory"; - private static final String TARGET_FILE_ARGS = "f"; - private static final String TARGET_FILE_NAME = "targetFile"; + private static final String TARGET_FILE_ARGS = "tfn"; + private static final String TARGET_FILE_NAME = "targetFileName"; private static final String SQL_FILE_ARGS = "s"; - private static final String SQL_FILE_NAME = "sqlfile"; + private static final String SQL_FILE_NAME = "sourceSqlFile"; private static final String DATA_TYPE_ARGS = "datatype"; private static final String DATA_TYPE_NAME = "datatype"; @@ -90,8 +90,8 @@ public class ExportData extends AbstractDataTool { private static final String ALIGNED_ARGS = "aligned"; private static final String ALIGNED_NAME = "export aligned insert sql"; - private static final String LINES_PER_FILE_ARGS = "linesPerFile"; - private static final String LINES_PER_FILE_ARGS_NAME = "Lines Per File"; + private static final String LINES_PER_FILE_ARGS = "lpf"; + private static final String LINES_PER_FILE_ARGS_NAME = "linesPerFile"; private static final String TSFILEDB_CLI_PREFIX = "ExportData"; @@ -207,7 +207,7 @@ public static void main(String[] args) { } private static void parseSpecialParams(CommandLine commandLine) throws ArgsErrorException { - targetDirectory = checkRequiredArg(TARGET_DIR_ARGS, TARGET_DIR_NAME, commandLine); + targetDirectory = checkRequiredArg(TARGET_DIR_ARGS, TARGET_DIR_NAME, commandLine, null); targetFile = commandLine.getOptionValue(TARGET_FILE_ARGS); needDataTypePrinted = Boolean.valueOf(commandLine.getOptionValue(DATA_TYPE_ARGS)); queryCommand = commandLine.getOptionValue(QUERY_COMMAND_ARGS); diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportData.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportData.java index 647bd2a497fc..fac86ac07fd0 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportData.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportData.java @@ -75,8 +75,8 @@ public class ImportData extends AbstractDataTool { - private static final String FILE_ARGS = "f"; - private static final String FILE_NAME = "file or folder"; + private static final String FILE_ARGS = "s"; + private static final String FILE_NAME = "sourceFileOrFolder"; private static final String FAILED_FILE_ARGS = "fd"; private static final String FAILED_FILE_NAME = "failed file directory"; @@ -98,8 +98,8 @@ public class ImportData extends AbstractDataTool { private static final String TYPE_INFER_ARGS = "typeInfer"; private static final String TYPE_INFER_ARGS_NAME = "type infer"; - private static final String LINES_PER_FAILED_FILE_ARGS = "linesPerFailedFile"; - private static final String LINES_PER_FAILED_FILE_ARGS_NAME = "Lines Per FailedFile"; + private static final String LINES_PER_FAILED_FILE_ARGS = "lpf"; + private static final String LINES_PER_FAILED_FILE_ARGS_NAME = "linesPerFailedFile"; private static final String TSFILEDB_CLI_PREFIX = "ImportData"; diff --git a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportSchema.java b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportSchema.java index acd55b3eb42b..610c9e1f7566 100644 --- a/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportSchema.java +++ b/iotdb-client/cli/src/main/java/org/apache/iotdb/tool/ImportSchema.java @@ -115,7 +115,6 @@ private static Options createOptions() { Option opFailedFile = Option.builder(FAILED_FILE_ARGS) - .required(false) .longOpt(FAILED_FILE_NAME) .hasArg() .argName(FAILED_FILE_ARGS_NAME) @@ -124,13 +123,6 @@ private static Options createOptions() { .build(); options.addOption(opFailedFile); - Option opAligned = - Option.builder(ALIGNED_ARGS) - .longOpt(ALIGNED_ARGS) - .desc("Whether import schema as aligned timeseries(optional)") - .build(); - options.addOption(opAligned); - Option opBatchPointSize = Option.builder(BATCH_POINT_SIZE_ARGS) .longOpt(BATCH_POINT_SIZE_NAME) @@ -341,35 +333,35 @@ private static void writeScheme( hasStarted.set(true); } else if (pointSize.get() >= batchPointSize) { try { - writeAndEmptyDataSet( - paths, - dataTypes, - encodings, - compressors, - null, - null, - null, - measurementAlias, - 3); - writeAndEmptyDataSet( - pathsWithAlias, - dataTypesWithAlias, - encodingsWithAlias, - compressorsWithAlias, - null, - null, - null, - null, - 3); - paths.clear(); - dataTypes.clear(); - encodings.clear(); - compressors.clear(); - measurementAlias.clear(); - pointSize.set(0); + if (CollectionUtils.isNotEmpty(paths)) { + writeAndEmptyDataSet( + paths, dataTypes, encodings, compressors, null, null, null, null, 3); + } + } catch (Exception e) { + paths.forEach(t -> failedRecords.add(Collections.singletonList(t))); + } + try { + if (CollectionUtils.isNotEmpty(pathsWithAlias)) { + writeAndEmptyDataSet( + pathsWithAlias, + dataTypesWithAlias, + encodingsWithAlias, + compressorsWithAlias, + null, + null, + null, + measurementAlias, + 3); + } } catch (Exception e) { - failedRecords.add((List) (List) paths); + paths.forEach(t -> failedRecords.add(Collections.singletonList(t))); } + paths.clear(); + dataTypes.clear(); + encodings.clear(); + compressors.clear(); + measurementAlias.clear(); + pointSize.set(0); } } else { paths.clear(); diff --git a/iotdb-client/client-cpp/pom.xml b/iotdb-client/client-cpp/pom.xml index 726e54bd5cbe..ca2440933dad 100644 --- a/iotdb-client/client-cpp/pom.xml +++ b/iotdb-client/client-cpp/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT client-cpp pom @@ -42,7 +42,7 @@ org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT provided diff --git a/iotdb-client/client-py/pom.xml b/iotdb-client/client-py/pom.xml index 7d23a6da94e5..13636635cd52 100644 --- a/iotdb-client/client-py/pom.xml +++ b/iotdb-client/client-py/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-python-api IoTDB: Client: Python-API @@ -34,7 +34,7 @@ org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT provided diff --git a/iotdb-client/client-py/requirements.txt b/iotdb-client/client-py/requirements.txt index ba23aeea0715..23c97837a798 100644 --- a/iotdb-client/client-py/requirements.txt +++ b/iotdb-client/client-py/requirements.txt @@ -21,5 +21,5 @@ pandas>=1.3.5 numpy>=1.21.4 thrift>=0.14.1 # SQLAlchemy Dialect -sqlalchemy == 1.3.20 -sqlalchemy-utils == 0.36.8 \ No newline at end of file +sqlalchemy<1.5,>=1.4 +sqlalchemy-utils>=0.37.8 diff --git a/iotdb-client/client-py/resources/setup.py b/iotdb-client/client-py/resources/setup.py index 3af2705af265..acf5857327ae 100644 --- a/iotdb-client/client-py/resources/setup.py +++ b/iotdb-client/client-py/resources/setup.py @@ -40,10 +40,9 @@ url="https://github.com/apache/iotdb", packages=setuptools.find_packages(), install_requires=[ - "thrift>=0.13.0", - "pandas>=1.0.0,<1.99.99", - "numpy>=1.0.0", - "testcontainers>=2.0.0", + "thrift>=0.14.1", + "pandas>=1.3.5", + "numpy>=1.21.4", "sqlalchemy<1.5,>=1.4", "sqlalchemy-utils>=0.37.8", ], diff --git a/iotdb-client/isession/pom.xml b/iotdb-client/isession/pom.xml index c6f139346d8e..ed15ef7d98fe 100644 --- a/iotdb-client/isession/pom.xml +++ b/iotdb-client/isession/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT isession IoTDB: Client: isession @@ -32,7 +32,7 @@ org.apache.iotdb service-rpc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.tsfile @@ -47,12 +47,12 @@ org.apache.iotdb iotdb-thrift - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.thrift diff --git a/iotdb-client/jdbc/pom.xml b/iotdb-client/jdbc/pom.xml index bd9bf106631d..34634e26d1d2 100644 --- a/iotdb-client/jdbc/pom.xml +++ b/iotdb-client/jdbc/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-jdbc IoTDB: Client: Jdbc @@ -38,12 +38,12 @@ org.apache.iotdb service-rpc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.tsfile @@ -58,7 +58,7 @@ org.apache.iotdb iotdb-thrift - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.thrift diff --git a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBJDBCResultSet.java b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBJDBCResultSet.java index 60fa9c98373c..ab6009343aa5 100644 --- a/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBJDBCResultSet.java +++ b/iotdb-client/jdbc/src/main/java/org/apache/iotdb/jdbc/IoTDBJDBCResultSet.java @@ -377,7 +377,11 @@ public byte getByte(String columnName) throws SQLException { @Override public byte[] getBytes(int columnIndex) throws SQLException { try { - return ioTDBRpcDataSet.getDataType(columnIndex).equals(TSDataType.BLOB) + final TSDataType dataType = ioTDBRpcDataSet.getDataType(columnIndex); + if (dataType == null) { + return null; + } + return dataType.equals(TSDataType.BLOB) ? ioTDBRpcDataSet.getBinary(columnIndex).getValues() : ioTDBRpcDataSet.getString(columnIndex).getBytes(charset); } catch (StatementExecutionException e) { @@ -388,7 +392,11 @@ public byte[] getBytes(int columnIndex) throws SQLException { @Override public byte[] getBytes(String columnName) throws SQLException { try { - return ioTDBRpcDataSet.getDataType(columnName).equals(TSDataType.BLOB) + final TSDataType dataType = ioTDBRpcDataSet.getDataType(columnName); + if (dataType == null) { + return null; + } + return dataType.equals(TSDataType.BLOB) ? ioTDBRpcDataSet.getBinary(columnName).getValues() : ioTDBRpcDataSet.getString(columnName).getBytes(charset); } catch (StatementExecutionException e) { diff --git a/iotdb-client/pom.xml b/iotdb-client/pom.xml index ee34d7e76a00..fe67109c928f 100644 --- a/iotdb-client/pom.xml +++ b/iotdb-client/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-client pom diff --git a/iotdb-client/service-rpc/pom.xml b/iotdb-client/service-rpc/pom.xml index 550bc3233cb5..693a97e18556 100644 --- a/iotdb-client/service-rpc/pom.xml +++ b/iotdb-client/service-rpc/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT service-rpc IoTDB: Client: Service-RPC @@ -60,12 +60,12 @@ org.apache.iotdb iotdb-thrift - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.thrift diff --git a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java index c32e310b65f4..014c5eedae1f 100644 --- a/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java +++ b/iotdb-client/service-rpc/src/main/java/org/apache/iotdb/rpc/IoTDBRpcDataSet.java @@ -528,7 +528,13 @@ public TSDataType getDataType(int columnIndex) throws StatementExecutionExceptio } public TSDataType getDataType(String columnName) throws StatementExecutionException { - return columnTypeDeduplicatedList.get(columnOrdinalMap.get(columnName) - START_INDEX); + if (columnName.equals(TIMESTAMP_STR)) { + return TSDataType.INT64; + } + final int index = columnOrdinalMap.get(columnName) - START_INDEX; + return index < 0 || index >= columnTypeDeduplicatedList.size() + ? null + : columnTypeDeduplicatedList.get(index); } public int findColumn(String columnName) { diff --git a/iotdb-client/session/pom.xml b/iotdb-client/session/pom.xml index 82b24c39d3bc..4e514bc3265b 100644 --- a/iotdb-client/session/pom.xml +++ b/iotdb-client/session/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-client - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-session IoTDB: Client: Session @@ -37,17 +37,17 @@ org.apache.iotdb service-rpc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb isession - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.tsfile @@ -62,7 +62,7 @@ org.apache.iotdb iotdb-thrift - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.slf4j diff --git a/iotdb-core/antlr/pom.xml b/iotdb-core/antlr/pom.xml index 185d9134c5ce..5247d212fe3c 100644 --- a/iotdb-core/antlr/pom.xml +++ b/iotdb-core/antlr/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-core - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-antlr IoTDB: Core: Antlr-Parser diff --git a/iotdb-core/confignode/pom.xml b/iotdb-core/confignode/pom.xml index 896799b3824f..168815c89dff 100644 --- a/iotdb-core/confignode/pom.xml +++ b/iotdb-core/confignode/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-core - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-confignode IoTDB: Core: ConfigNode @@ -42,57 +42,57 @@ org.apache.iotdb service-rpc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-consensus - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-server - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb pipe-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb trigger-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb metrics-interface - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-confignode - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb node-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb udf-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.tsfile diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigRegionExtractorMetrics.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigRegionExtractorMetrics.java index 403fb8a2b6c3..b736b2a82e87 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigRegionExtractorMetrics.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/pipe/metric/PipeConfigRegionExtractorMetrics.java @@ -122,10 +122,8 @@ public void deregister(final String taskID) { public long getRemainingEventCount(final String pipeName, final long creationTime) { final String taskID = pipeName + "_" + creationTime; final IoTDBConfigRegionExtractor extractor = extractorMap.get(taskID); + // Do not print log to allow collection when config region extractor does not exists if (Objects.isNull(extractor)) { - LOGGER.warn( - "Failed to get remaining event count, IoTDBConfigRegionExtractor({}) does not exist", - taskID); return 0; } return extractor.getUnTransferredEventCount(); diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java index 66a8ffd3001b..ee3f8855d336 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/RegionMaintainHandler.java @@ -380,6 +380,7 @@ public TRegionMigrateResult waitTaskFinish(long taskId, TDataNodeLocation dataNo try (SyncDataNodeInternalServiceClient dataNodeClient = dataNodeClientManager.borrowClient(dataNodeLocation.getInternalEndPoint())) { TRegionMigrateResult report = dataNodeClient.getRegionMaintainResult(taskId); + lastReportTime = System.nanoTime(); if (report.getTaskStatus() != TRegionMaintainTaskStatus.PROCESSING) { return report; } diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java index ed3d59bd3d48..afdbfe244d72 100644 --- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java +++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/subscription/topic/CreateTopicProcedure.java @@ -20,7 +20,9 @@ package org.apache.iotdb.confignode.procedure.impl.subscription.topic; import org.apache.iotdb.common.rpc.thrift.TSStatus; +import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.subscription.meta.topic.TopicMeta; +import org.apache.iotdb.commons.utils.CommonDateTimeUtils; import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.CreateTopicPlan; import org.apache.iotdb.confignode.consensus.request.write.subscription.topic.DropTopicPlan; import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv; @@ -76,7 +78,9 @@ protected void executeFromValidate(ConfigNodeProcedureEnv env) throws Subscripti topicMeta = new TopicMeta( createTopicReq.getTopicName(), - System.currentTimeMillis(), + CommonDateTimeUtils.convertMilliTimeWithPrecision( + System.currentTimeMillis(), + CommonDescriptor.getInstance().getConfig().getTimestampPrecision()), createTopicReq.getTopicAttributes()); } diff --git a/iotdb-core/consensus/pom.xml b/iotdb-core/consensus/pom.xml index f6622cfd3673..2768096316a9 100644 --- a/iotdb-core/consensus/pom.xml +++ b/iotdb-core/consensus/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-core - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-consensus IoTDB: Core: Consensus @@ -39,32 +39,32 @@ org.apache.iotdb node-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb metrics-interface - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-consensus - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb service-rpc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb pipe-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.ratis diff --git a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java index d7424f136d77..f5de226cc41d 100644 --- a/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java +++ b/iotdb-core/consensus/src/main/java/org/apache/iotdb/consensus/iot/logdispatcher/LogDispatcher.java @@ -46,12 +46,9 @@ import java.util.OptionalLong; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; +import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Collectors; @@ -101,7 +98,7 @@ private void initLogSyncThreadPool() { public synchronized void start() { if (!threads.isEmpty()) { - threads.forEach(thread -> thread.setFuture(executorService.submit(thread))); + threads.forEach(executorService::submit); } } @@ -133,7 +130,7 @@ public synchronized void addLogDispatcherThread(Peer peer, long initialSyncIndex if (this.executorService == null) { initLogSyncThreadPool(); } - thread.setFuture(executorService.submit(thread)); + executorService.submit(thread); } public synchronized void removeLogDispatcherThread(Peer peer) throws IOException { @@ -231,7 +228,7 @@ public class LogDispatcherThread implements Runnable { private final LogDispatcherThreadMetrics logDispatcherThreadMetrics; - private Future future; + private Semaphore threadSemaphore = new Semaphore(0); public LogDispatcherThread(Peer peer, IoTConsensusConfig config, long initialSyncIndex) { this.peer = peer; @@ -257,10 +254,6 @@ public long getCurrentSyncIndex() { return controller.getCurrentIndex(); } - public void setFuture(Future future) { - this.future = future; - } - public long getLastFlushedSyncIndex() { return controller.getLastFlushedIndex(); } @@ -308,16 +301,12 @@ private void releaseReservedMemory(IndexedConsensusRequest indexedConsensusReque public void stop() { stopped = true; - if (!future.cancel(true)) { - logger.warn("LogDispatcherThread Future for {} is not stopped", peer); - } try { - future.get(30, TimeUnit.SECONDS); - } catch (InterruptedException | ExecutionException | TimeoutException e) { + if (!threadSemaphore.tryAcquire(30, TimeUnit.SECONDS)) { + logger.error("{}: Dispatcher for {} didn't stop after 30s.", impl.getThisNode(), peer); + } + } catch (InterruptedException e) { Thread.currentThread().interrupt(); - logger.warn("LogDispatcherThread Future for {} is not stopped", peer, e); - } catch (CancellationException ignored) { - // ignore because it is expected } long requestSize = 0; for (IndexedConsensusRequest indexedConsensusRequest : pendingEntries) { @@ -351,7 +340,7 @@ public void run() { logger.info("{}: Dispatcher for {} starts", impl.getThisNode(), peer); try { Batch batch; - while (!Thread.interrupted()) { + while (!Thread.interrupted() && !stopped) { long startTime = System.nanoTime(); while ((batch = getBatch()).isEmpty()) { // we may block here if there is no requests in the queue @@ -366,12 +355,12 @@ public void run() { } } // Immediately check for interrupts after poll and sleep - if (Thread.interrupted()) { + if (Thread.interrupted() || stopped) { throw new InterruptedException("Interrupted after polling and sleeping"); } } // Immediately check for interrupts after a time-consuming getBatch() operation - if (Thread.interrupted()) { + if (Thread.interrupted() || stopped) { throw new InterruptedException("Interrupted after getting a batch"); } logDispatcherThreadMetrics.recordConstructBatchTime(System.nanoTime() - startTime); @@ -388,6 +377,7 @@ public void run() { } catch (Exception e) { logger.error("Unexpected error in logDispatcher for peer {}", peer, e); } + threadSemaphore.release(); logger.info("{}: Dispatcher for {} exits", impl.getThisNode(), peer); } diff --git a/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/ReplicateTest.java b/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/ReplicateTest.java index 610692a73e1a..8072ab100660 100644 --- a/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/ReplicateTest.java +++ b/iotdb-core/consensus/src/test/java/org/apache/iotdb/consensus/iot/ReplicateTest.java @@ -33,21 +33,17 @@ import org.apache.iotdb.consensus.iot.util.TestStateMachine; import org.apache.ratis.util.FileUtils; -import org.apache.tsfile.utils.PublicBAOS; import org.junit.After; import org.junit.Assert; +import org.junit.Assume; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.DataOutputStream; import java.io.File; import java.io.IOException; import java.net.ServerSocket; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -102,60 +98,39 @@ public void tearDown() throws Exception { } } - public void changeConfiguration(int i) { - try (PublicBAOS publicBAOS = new PublicBAOS(); - DataOutputStream outputStream = new DataOutputStream(publicBAOS)) { - outputStream.writeInt(this.peers.size()); - for (Peer peer : this.peers) { - peer.serialize(outputStream); - } - File storageDir = new File(IoTConsensus.buildPeerDir(peersStorage.get(i), gid)); - Path tmpConfigurationPath = - Paths.get(new File(storageDir, CONFIGURATION_TMP_FILE_NAME).getAbsolutePath()); - Path configurationPath = - Paths.get(new File(storageDir, CONFIGURATION_FILE_NAME).getAbsolutePath()); - if (!Files.exists(configurationPath) && !Files.exists(tmpConfigurationPath)) { - return; - } - if (!Files.exists(tmpConfigurationPath)) { - Files.createDirectories(tmpConfigurationPath.getParent()); - Files.createFile(tmpConfigurationPath); - } - Files.write(tmpConfigurationPath, publicBAOS.getBuf()); - if (Files.exists(configurationPath)) { - Files.delete(configurationPath); + private void initServer() throws IOException { + Assume.assumeTrue(checkPortAvailable()); + try { + for (int i = 0; i < peers.size(); i++) { + int finalI = i; + servers.add( + (IoTConsensus) + ConsensusFactory.getConsensusImpl( + ConsensusFactory.IOT_CONSENSUS, + ConsensusConfig.newBuilder() + .setThisNodeId(peers.get(i).getNodeId()) + .setThisNode(peers.get(i).getEndpoint()) + .setStorageDir(peersStorage.get(i).getAbsolutePath()) + .setConsensusGroupType(TConsensusGroupType.DataRegion) + .build(), + groupId -> stateMachines.get(finalI)) + .orElseThrow( + () -> + new IllegalArgumentException( + String.format( + ConsensusFactory.CONSTRUCT_FAILED_MSG, + ConsensusFactory.IOT_CONSENSUS)))); + servers.get(i).start(); } - Files.move(tmpConfigurationPath, configurationPath); } catch (IOException e) { - logger.error("Unexpected error occurs when persisting configuration", e); - } - } - - private void initServer() throws IOException { - for (int i = 0; i < peers.size(); i++) { - findPortAvailable(i); - } - for (int i = 0; i < peers.size(); i++) { - int finalI = i; - changeConfiguration(i); - servers.add( - (IoTConsensus) - ConsensusFactory.getConsensusImpl( - ConsensusFactory.IOT_CONSENSUS, - ConsensusConfig.newBuilder() - .setThisNodeId(peers.get(i).getNodeId()) - .setThisNode(peers.get(i).getEndpoint()) - .setStorageDir(peersStorage.get(i).getAbsolutePath()) - .setConsensusGroupType(TConsensusGroupType.DataRegion) - .build(), - groupId -> stateMachines.get(finalI)) - .orElseThrow( - () -> - new IllegalArgumentException( - String.format( - ConsensusFactory.CONSTRUCT_FAILED_MSG, - ConsensusFactory.IOT_CONSENSUS)))); - servers.get(i).start(); + if (e.getCause() instanceof StartupException) { + // just succeed when can not bind socket + logger.info("Can not start IoTConsensus because", e); + Assume.assumeTrue(false); + } else { + logger.error("Failed because", e); + Assert.fail("Failed because " + e.getMessage()); + } } } @@ -363,23 +338,15 @@ public void parsingAndConstructIDTest() throws Exception { } } - private void findPortAvailable(int i) { - long start = System.currentTimeMillis(); - while (System.currentTimeMillis() - start < timeout) { - try (ServerSocket ignored = new ServerSocket(this.peers.get(i).getEndpoint().port)) { - // success - return; + private boolean checkPortAvailable() { + for (Peer peer : this.peers) { + try (ServerSocket ignored = new ServerSocket(peer.getEndpoint().port)) { + logger.info("check port {} success for node {}", peer.getEndpoint().port, peer.getNodeId()); } catch (IOException e) { - // Port is already in use, wait and retry - this.peers.set(i, new Peer(gid, i + 1, new TEndPoint("127.0.0.1", this.basePort))); - logger.info("try port {} for node {}.", this.basePort++, i + 1); - try { - Thread.sleep(50); // Wait for 1 second before retrying - } catch (InterruptedException ex) { - // Handle the interruption if needed - } + logger.error("check port {} failed for node {}", peer.getEndpoint().port, peer.getNodeId()); + return false; } } - Assert.fail(String.format("can not find port for node %d after 300s", i + 1)); + return true; } } diff --git a/iotdb-core/datanode/pom.xml b/iotdb-core/datanode/pom.xml index 61a76f891c7c..eb072bb5cbbb 100644 --- a/iotdb-core/datanode/pom.xml +++ b/iotdb-core/datanode/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-core - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-server IoTDB: Core: Data-Node (Server) @@ -37,12 +37,12 @@ org.apache.iotdb service-rpc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-consensus - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.tsfile @@ -57,77 +57,77 @@ org.apache.iotdb external-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb openapi - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb node-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb isession - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-antlr - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-relational-grammar - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-consensus - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb udf-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb trigger-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb metrics-interface - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-confignode - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb pipe-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-session - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.commons @@ -305,7 +305,7 @@ org.apache.iotdb metrics-core - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.mockito diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java index bbcbc0ed7947..fd1068b12577 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/conf/IoTDBDescriptor.java @@ -1824,6 +1824,9 @@ public void loadHotModifiedProps(Properties properties) throws QueryProcessExcep // update Consensus config reloadConsensusProps(properties); + + // update retry config + commonDescriptor.loadRetryProperties(properties); } catch (Exception e) { throw new QueryProcessException(String.format("Fail to reload configuration because %s", e)); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java index 7407b1b9554e..038da9781538 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletBatchEventHandler.java @@ -121,18 +121,20 @@ public void onComplete(final TPipeTransferResp response) { @Override public void onError(final Exception exception) { - LOGGER.warn( - "Failed to transfer TabletInsertionEvent batch {} (request commit ids={}).", - events.stream() - .map( - event -> - event instanceof EnrichedEvent - ? ((EnrichedEvent) event).coreReportMessage() - : event.toString()) - .collect(Collectors.toList()), - requestCommitIds, - exception); - - connector.addFailureEventsToRetryQueue(events); + try { + LOGGER.warn( + "Failed to transfer TabletInsertionEvent batch {} (request commit ids={}).", + events.stream() + .map( + event -> + event instanceof EnrichedEvent + ? ((EnrichedEvent) event).coreReportMessage() + : event.toString()) + .collect(Collectors.toList()), + requestCommitIds, + exception); + } finally { + connector.addFailureEventsToRetryQueue(events); + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java index 94fa6d5ac1a3..2e54a8c7c0be 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTabletInsertionEventHandler.java @@ -97,15 +97,17 @@ public void onComplete(TPipeTransferResp response) { @Override public void onError(Exception exception) { - LOGGER.warn( - "Failed to transfer TabletInsertionEvent {} (committer key={}, commit id={}).", - event instanceof EnrichedEvent - ? ((EnrichedEvent) event).coreReportMessage() - : event.toString(), - event instanceof EnrichedEvent ? ((EnrichedEvent) event).getCommitterKey() : null, - event instanceof EnrichedEvent ? ((EnrichedEvent) event).getCommitId() : null, - exception); - - connector.addFailureEventToRetryQueue(event); + try { + LOGGER.warn( + "Failed to transfer TabletInsertionEvent {} (committer key={}, commit id={}).", + event instanceof EnrichedEvent + ? ((EnrichedEvent) event).coreReportMessage() + : event.toString(), + event instanceof EnrichedEvent ? ((EnrichedEvent) event).getCommitterKey() : null, + event instanceof EnrichedEvent ? ((EnrichedEvent) event).getCommitId() : null, + exception); + } finally { + connector.addFailureEventToRetryQueue(event); + } } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileInsertionEventHandler.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileInsertionEventHandler.java index e7e374443c64..a97d87ebdac7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileInsertionEventHandler.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/connector/protocol/thrift/async/handler/PipeTransferTsFileInsertionEventHandler.java @@ -241,18 +241,22 @@ public void onComplete(final TPipeTransferResp response) { @Override public void onError(final Exception exception) { - LOGGER.warn( - "Failed to transfer TsFileInsertionEvent {} (committer key {}, commit id {}).", - tsFile, - event.getCommitterKey(), - event.getCommitId(), - exception); + try { + LOGGER.warn( + "Failed to transfer TsFileInsertionEvent {} (committer key {}, commit id {}).", + tsFile, + event.getCommitterKey(), + event.getCommitId(), + exception); + } catch (final Exception e) { + LOGGER.warn("Failed to log error when failed to transfer file.", e); + } try { if (Objects.nonNull(clientManager)) { clientManager.adjustTimeoutIfNecessary(exception); } - } catch (Exception e) { + } catch (final Exception e) { LOGGER.warn("Failed to adjust timeout when failed to transfer file.", e); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java index c3d0798c7e9b..9738f86a5dd4 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/event/common/heartbeat/PipeHeartbeatEvent.java @@ -25,8 +25,6 @@ import org.apache.iotdb.commons.pipe.pattern.PipePattern; import org.apache.iotdb.commons.pipe.task.connection.UnboundedBlockingPendingQueue; import org.apache.iotdb.commons.pipe.task.meta.PipeTaskMeta; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionExtractor; -import org.apache.iotdb.db.pipe.extractor.dataregion.realtime.PipeRealtimeDataRegionHybridExtractor; import org.apache.iotdb.db.pipe.metric.PipeHeartbeatEventMetrics; import org.apache.iotdb.db.utils.DateTimeUtils; import org.apache.iotdb.pipe.api.event.Event; @@ -41,7 +39,6 @@ public class PipeHeartbeatEvent extends EnrichedEvent { private final String dataRegionId; private String pipeName; - private PipeRealtimeDataRegionExtractor extractor = null; private long timePublished; private long timeAssigned; @@ -62,18 +59,18 @@ public class PipeHeartbeatEvent extends EnrichedEvent { private final boolean shouldPrintMessage; - public PipeHeartbeatEvent(String dataRegionId, boolean shouldPrintMessage) { + public PipeHeartbeatEvent(final String dataRegionId, final boolean shouldPrintMessage) { super(null, null, null, Long.MIN_VALUE, Long.MAX_VALUE); this.dataRegionId = dataRegionId; this.shouldPrintMessage = shouldPrintMessage; } public PipeHeartbeatEvent( - String pipeName, - PipeTaskMeta pipeTaskMeta, - String dataRegionId, - long timePublished, - boolean shouldPrintMessage) { + final String pipeName, + final PipeTaskMeta pipeTaskMeta, + final String dataRegionId, + final long timePublished, + final boolean shouldPrintMessage) { super(pipeName, pipeTaskMeta, null, Long.MIN_VALUE, Long.MAX_VALUE); this.dataRegionId = dataRegionId; this.timePublished = timePublished; @@ -81,12 +78,12 @@ public PipeHeartbeatEvent( } @Override - public boolean internallyIncreaseResourceReferenceCount(String holderMessage) { + public boolean internallyIncreaseResourceReferenceCount(final String holderMessage) { return true; } @Override - public boolean internallyDecreaseResourceReferenceCount(String holderMessage) { + public boolean internallyDecreaseResourceReferenceCount(final String holderMessage) { // PipeName == null indicates that the event is the raw event at disruptor, // not the event copied and passed to the extractor if (shouldPrintMessage && pipeName != null && LOGGER.isDebugEnabled()) { @@ -102,11 +99,11 @@ public ProgressIndex getProgressIndex() { @Override public EnrichedEvent shallowCopySelfAndBindPipeTaskMetaForProgressReport( - String pipeName, - PipeTaskMeta pipeTaskMeta, - PipePattern pattern, - long startTime, - long endTime) { + final String pipeName, + final PipeTaskMeta pipeTaskMeta, + final PipePattern pattern, + final long startTime, + final long endTime) { // Should record PipeTaskMeta, for sometimes HeartbeatEvents should report exceptions. // Here we ignore parameters `pattern`, `startTime`, and `endTime`. return new PipeHeartbeatEvent( @@ -131,7 +128,7 @@ public boolean isShouldPrintMessage() { /////////////////////////////// Delay Reporting /////////////////////////////// - public void bindPipeName(String pipeName) { + public void bindPipeName(final String pipeName) { if (shouldPrintMessage) { this.pipeName = pipeName; } @@ -175,13 +172,13 @@ public void onTransferred() { /////////////////////////////// Queue size Reporting /////////////////////////////// - public void recordDisruptorSize(RingBuffer ringBuffer) { + public void recordDisruptorSize(final RingBuffer ringBuffer) { if (shouldPrintMessage) { disruptorSize = ringBuffer.getBufferSize() - (int) ringBuffer.remainingCapacity(); } } - public void recordExtractorQueueSize(UnboundedBlockingPendingQueue pendingQueue) { + public void recordExtractorQueueSize(final UnboundedBlockingPendingQueue pendingQueue) { if (shouldPrintMessage) { extractorQueueTabletSize = pendingQueue.getTabletInsertionEventCount(); extractorQueueTsFileSize = pendingQueue.getTsFileInsertionEventCount(); @@ -189,23 +186,12 @@ public void recordExtractorQueueSize(UnboundedBlockingPendingQueue pendin } } - public void recordConnectorQueueSize(UnboundedBlockingPendingQueue pendingQueue) { + public void recordConnectorQueueSize(final UnboundedBlockingPendingQueue pendingQueue) { if (shouldPrintMessage) { connectorQueueTabletSize = pendingQueue.getTabletInsertionEventCount(); connectorQueueTsFileSize = pendingQueue.getTsFileInsertionEventCount(); connectorQueueSize = pendingQueue.size(); } - - if (extractor instanceof PipeRealtimeDataRegionHybridExtractor) { - ((PipeRealtimeDataRegionHybridExtractor) extractor) - .informConnectorInputPendingQueueTsFileSize(pendingQueue.getTsFileInsertionEventCount()); - } - } - - /////////////////////////////// For Hybrid extractor /////////////////////////////// - - public void bindExtractor(PipeRealtimeDataRegionExtractor extractor) { - this.extractor = extractor; } /////////////////////////////// For Commit Ordering /////////////////////////////// diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java index 1c150dd54093..dc86e82e361d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/historical/PipeHistoricalDataRegionTsFileExtractor.java @@ -45,6 +45,7 @@ import org.apache.iotdb.pipe.api.event.Event; import org.apache.iotdb.pipe.api.exception.PipeParameterNotValidException; +import org.apache.tsfile.utils.Pair; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -101,6 +102,7 @@ public class PipeHistoricalDataRegionTsFileExtractor implements PipeHistoricalDa private boolean sloppyTimeRange; // true to disable time range filter after extraction + private Pair listeningOptionPair; private boolean shouldExtractInsertion; private boolean shouldTransferModFile; // Whether to transfer mods @@ -113,6 +115,14 @@ public class PipeHistoricalDataRegionTsFileExtractor implements PipeHistoricalDa public void validate(final PipeParameterValidator validator) { final PipeParameters parameters = validator.getParameters(); + try { + listeningOptionPair = + DataRegionListeningFilter.parseInsertionDeletionListeningOptionPair(parameters); + } catch (final Exception e) { + // compatible with the current validation framework + throw new PipeParameterNotValidException(e.getMessage()); + } + if (parameters.hasAnyAttributes( SOURCE_START_TIME_KEY, EXTRACTOR_START_TIME_KEY, @@ -134,17 +144,21 @@ public void validate(final PipeParameterValidator validator) { if (historicalDataExtractionStartTime > historicalDataExtractionEndTime) { throw new PipeParameterNotValidException( String.format( - "%s or %s should be less than or equal to %s or %s.", + "%s (%s) [%s] should be less than or equal to %s (%s) [%s].", SOURCE_START_TIME_KEY, EXTRACTOR_START_TIME_KEY, + historicalDataExtractionStartTime, SOURCE_END_TIME_KEY, - EXTRACTOR_END_TIME_KEY)); + EXTRACTOR_END_TIME_KEY, + historicalDataExtractionEndTime)); } - return; } catch (final Exception e) { // compatible with the current validation framework throw new PipeParameterNotValidException(e.getMessage()); } + + // return here + return; } // Historical data extraction is enabled in the following cases: @@ -179,29 +193,14 @@ public void validate(final PipeParameterValidator validator) { if (historicalDataExtractionStartTime > historicalDataExtractionEndTime) { throw new PipeParameterNotValidException( String.format( - "%s (%s) should be less than or equal to %s (%s).", + "%s (%s) [%s] should be less than or equal to %s (%s) [%s].", EXTRACTOR_HISTORY_START_TIME_KEY, SOURCE_HISTORY_START_TIME_KEY, + historicalDataExtractionStartTime, EXTRACTOR_HISTORY_END_TIME_KEY, - SOURCE_HISTORY_END_TIME_KEY)); + SOURCE_HISTORY_END_TIME_KEY, + historicalDataExtractionEndTime)); } - - shouldTransferModFile = - parameters.getBooleanOrDefault( - Arrays.asList(SOURCE_MODS_ENABLE_KEY, EXTRACTOR_MODS_ENABLE_KEY), - EXTRACTOR_MODS_ENABLE_DEFAULT_VALUE - || // Should extract deletion - DataRegionListeningFilter.parseInsertionDeletionListeningOptionPair(parameters) - .getRight()); - - shouldTerminatePipeOnAllHistoricalEventsConsumed = - parameters - .getStringOrDefault( - Arrays.asList( - PipeExtractorConstant.EXTRACTOR_MODE_KEY, - PipeExtractorConstant.SOURCE_MODE_KEY), - PipeExtractorConstant.EXTRACTOR_MODE_DEFAULT_VALUE) - .equalsIgnoreCase(PipeExtractorConstant.EXTRACTOR_MODE_QUERY_VALUE); } catch (final Exception e) { // Compatible with the current validation framework throw new PipeParameterNotValidException(e.getMessage()); @@ -212,8 +211,7 @@ public void validate(final PipeParameterValidator validator) { public void customize( final PipeParameters parameters, final PipeExtractorRuntimeConfiguration configuration) throws IllegalPathException { - shouldExtractInsertion = - DataRegionListeningFilter.parseInsertionDeletionListeningOptionPair(parameters).getLeft(); + shouldExtractInsertion = listeningOptionPair.getLeft(); // Do nothing if only extract deletion if (!shouldExtractInsertion) { return; @@ -295,15 +293,33 @@ public void customize( .collect(Collectors.toSet()) .contains("time"); + shouldTransferModFile = + parameters.getBooleanOrDefault( + Arrays.asList(SOURCE_MODS_ENABLE_KEY, EXTRACTOR_MODS_ENABLE_KEY), + EXTRACTOR_MODS_ENABLE_DEFAULT_VALUE + || // Should extract deletion + listeningOptionPair.getRight()); + + shouldTerminatePipeOnAllHistoricalEventsConsumed = + parameters + .getStringOrDefault( + Arrays.asList( + PipeExtractorConstant.EXTRACTOR_MODE_KEY, + PipeExtractorConstant.SOURCE_MODE_KEY), + PipeExtractorConstant.EXTRACTOR_MODE_DEFAULT_VALUE) + .equalsIgnoreCase(PipeExtractorConstant.EXTRACTOR_MODE_QUERY_VALUE); + LOGGER.info( - "Pipe {}@{}: historical data extraction time range, start time {}({}), end time {}({}), sloppy time range {}", + "Pipe {}@{}: historical data extraction time range, start time {}({}), end time {}({}), sloppy time range {}, should transfer mod file {}, should terminate pipe on all historical events consumed {}", pipeName, dataRegionId, DateTimeUtils.convertLongToDate(historicalDataExtractionStartTime), historicalDataExtractionStartTime, DateTimeUtils.convertLongToDate(historicalDataExtractionEndTime), historicalDataExtractionEndTime, - sloppyTimeRange); + sloppyTimeRange, + shouldTransferModFile, + shouldTerminatePipeOnAllHistoricalEventsConsumed); } private void flushDataRegionAllTsFiles() { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java index d809bd72a818..c4bec4246e48 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionExtractor.java @@ -133,11 +133,13 @@ public void validate(final PipeParameterValidator validator) throws Exception { if (realtimeDataExtractionStartTime > realtimeDataExtractionEndTime) { throw new PipeParameterNotValidException( String.format( - "%s or %s should be less than or equal to %s or %s.", + "%s (%s) [%s] should be less than or equal to %s (%s) [%s].", SOURCE_START_TIME_KEY, EXTRACTOR_START_TIME_KEY, + realtimeDataExtractionStartTime, SOURCE_END_TIME_KEY, - EXTRACTOR_END_TIME_KEY)); + EXTRACTOR_END_TIME_KEY, + realtimeDataExtractionEndTime)); } } catch (final Exception e) { // compatible with the current validation framework @@ -296,13 +298,10 @@ public final void extract(final PipeRealtimeEvent event) { protected abstract void doExtract(final PipeRealtimeEvent event); protected void extractHeartbeat(final PipeRealtimeEvent event) { - // Bind extractor so that the heartbeat event can later inform the extractor of queue size - ((PipeHeartbeatEvent) event.getEvent()).bindExtractor(this); - // Record the pending queue size before trying to put heartbeatEvent into queue ((PipeHeartbeatEvent) event.getEvent()).recordExtractorQueueSize(pendingQueue); - Event lastEvent = pendingQueue.peekLast(); + final Event lastEvent = pendingQueue.peekLast(); if (lastEvent instanceof PipeRealtimeEvent && ((PipeRealtimeEvent) lastEvent).getEvent() instanceof PipeHeartbeatEvent && (((PipeHeartbeatEvent) ((PipeRealtimeEvent) lastEvent).getEvent()).isShouldPrintMessage() diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java index 381b8d738585..a0f075298a59 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/extractor/dataregion/realtime/PipeRealtimeDataRegionHybridExtractor.java @@ -40,15 +40,12 @@ import org.slf4j.LoggerFactory; import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; public class PipeRealtimeDataRegionHybridExtractor extends PipeRealtimeDataRegionExtractor { private static final Logger LOGGER = LoggerFactory.getLogger(PipeRealtimeDataRegionHybridExtractor.class); - private final AtomicInteger connectorInputPendingQueueTsFileSize = new AtomicInteger(0); - @Override protected void doExtract(final PipeRealtimeEvent event) { final Event eventToExtract = event.getEvent(); @@ -238,7 +235,7 @@ private boolean isHistoricalTsFileEventCountExceededLimit() { } private boolean isRealtimeTsFileEventCountExceededLimit() { - return pendingQueue.getTsFileInsertionEventCount() + connectorInputPendingQueueTsFileSize.get() + return pendingQueue.getTsFileInsertionEventCount() >= PipeConfig.getInstance().getPipeMaxAllowedPendingTsFileEpochPerDataRegion(); } @@ -247,10 +244,6 @@ private boolean mayTsFileLinkedCountReachDangerousThreshold() { >= PipeConfig.getInstance().getPipeMaxAllowedLinkedTsFileCount(); } - public void informConnectorInputPendingQueueTsFileSize(final int queueSize) { - connectorInputPendingQueueTsFileSize.set(queueSize); - } - @Override public Event supply() { PipeRealtimeEvent realtimeEvent = (PipeRealtimeEvent) pendingQueue.directPoll(); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeRemainingEventAndTimeOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeRemainingEventAndTimeOperator.java index a4b2fd3e7b79..f7a08295f408 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeRemainingEventAndTimeOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/metric/PipeDataNodeRemainingEventAndTimeOperator.java @@ -23,7 +23,6 @@ import org.apache.iotdb.db.pipe.extractor.dataregion.IoTDBDataRegionExtractor; import org.apache.iotdb.db.pipe.extractor.schemaregion.IoTDBSchemaRegionExtractor; import org.apache.iotdb.db.pipe.task.subtask.connector.PipeConnectorSubtask; -import org.apache.iotdb.db.pipe.task.subtask.processor.PipeProcessorSubtask; import com.codahale.metrics.Clock; import com.codahale.metrics.ExponentialMovingAverages; @@ -41,8 +40,6 @@ class PipeDataNodeRemainingEventAndTimeOperator { private final ConcurrentMap dataRegionExtractors = new ConcurrentHashMap<>(); - private final ConcurrentMap dataRegionProcessors = - new ConcurrentHashMap<>(); private final ConcurrentMap dataRegionConnectors = new ConcurrentHashMap<>(); private final ConcurrentMap diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtask.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtask.java index 22e87c7e5c8e..40ac29eaa819 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtask.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/pipe/task/subtask/connector/PipeConnectorSubtask.java @@ -144,7 +144,7 @@ protected boolean executeOnce() { taskID, lastEvent instanceof EnrichedEvent ? ((EnrichedEvent) lastEvent).coreReportMessage() - : lastEvent.toString(), + : lastEvent, ErrorHandlingUtils.getRootCause(e).getMessage()), e); } else { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/ActiveRegionScanMergeOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/ActiveRegionScanMergeOperator.java index 8efeeecce424..1749b61b760d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/ActiveRegionScanMergeOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/process/ActiveRegionScanMergeOperator.java @@ -117,26 +117,37 @@ public TsBlock next() throws Exception { } } - TimeColumnBuilder timeColumnBuilder = tsBlockBuilder.getTimeColumnBuilder(); - ColumnBuilder[] valueColumnBuilders = tsBlockBuilder.getValueColumnBuilders(); - int curTsBlockRowIndex; - for (int i = 0; i < inputOperatorsCount; i++) { - if (inputTsBlocks[i] == null) { - continue; + if (!needMergeBeforeCount) { + for (int i = 0; i < inputOperatorsCount; i++) { + if (inputTsBlocks[i] == null) { + continue; + } + for (int row = 0; row < maxRowCanBuild; row++) { + long childCount = inputTsBlocks[i].getValueColumns()[0].getLong(inputIndex[i] + row); + count += childCount; + inputIndex[i] += maxRowCanBuild; + } } - curTsBlockRowIndex = inputIndex[i]; - for (int row = 0; row < maxRowCanBuild; row++) { - String id = - inputTsBlocks[i].getValueColumns()[0].getBinary(curTsBlockRowIndex + row).toString(); - if (!outputCount || needMergeBeforeCount) { + } else { + TimeColumnBuilder timeColumnBuilder = tsBlockBuilder.getTimeColumnBuilder(); + ColumnBuilder[] valueColumnBuilders = tsBlockBuilder.getValueColumnBuilders(); + int curTsBlockRowIndex; + for (int i = 0; i < inputOperatorsCount; i++) { + if (inputTsBlocks[i] == null) { + continue; + } + curTsBlockRowIndex = inputIndex[i]; + for (int row = 0; row < maxRowCanBuild; row++) { + String id = + inputTsBlocks[i].getValueColumns()[0].getBinary(curTsBlockRowIndex + row).toString(); if (deduplicatedSet.contains(id)) { continue; } deduplicatedSet.add(id); + buildOneRow(i, curTsBlockRowIndex + row, timeColumnBuilder, valueColumnBuilders); } - buildOneRow(i, curTsBlockRowIndex + row, timeColumnBuilder, valueColumnBuilders); + inputIndex[i] += maxRowCanBuild; } - inputIndex[i] += maxRowCanBuild; } return outputCount ? returnResultIfNoMoreData() : tsBlockBuilder.build(); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractRegionScanDataSourceOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractRegionScanDataSourceOperator.java index b9cd4d442d7d..0f20259d5dca 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractRegionScanDataSourceOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/AbstractRegionScanDataSourceOperator.java @@ -37,6 +37,9 @@ public abstract class AbstractRegionScanDataSourceOperator extends AbstractSourc protected boolean finished = false; + protected boolean outputCount; + protected long count = 0; + protected AbstractRegionScanForActiveDataUtil regionScanUtil; protected TsBlockBuilder resultTsBlockBuilder; @@ -97,16 +100,28 @@ public boolean hasNext() throws Exception { } while (System.nanoTime() - start < maxRuntime && !resultTsBlockBuilder.isFull()); finished = - resultTsBlockBuilder.isEmpty() + (resultTsBlockBuilder.isEmpty()) && ((!regionScanUtil.hasMoreData() && regionScanUtil.isCurrentTsFileFinished()) || isAllDataChecked()); - return !finished; + boolean hasCachedCountValue = buildCountResultIfNeed(); + return !finished || hasCachedCountValue; } catch (IOException e) { throw new IOException("Error occurs when scanning active time series.", e); } } + private boolean buildCountResultIfNeed() { + if (!outputCount || !finished || count == -1) { + return false; + } + resultTsBlockBuilder.getTimeColumnBuilder().writeLong(-1); + resultTsBlockBuilder.getValueColumnBuilders()[0].writeLong(count); + resultTsBlockBuilder.declarePosition(); + count = -1; + return true; + } + @Override public void close() throws Exception { // do nothing diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/ActiveDeviceRegionScanOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/ActiveDeviceRegionScanOperator.java index 9868afb0ca02..1f7478a00705 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/ActiveDeviceRegionScanOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/ActiveDeviceRegionScanOperator.java @@ -51,7 +51,9 @@ public ActiveDeviceRegionScanOperator( OperatorContext operatorContext, PlanNodeId sourceId, Map deviceToAlignedMap, - Filter timeFilter) { + Filter timeFilter, + boolean outputCount) { + this.outputCount = outputCount; this.sourceId = sourceId; this.operatorContext = operatorContext; this.deviceToAlignedMap = deviceToAlignedMap; @@ -70,26 +72,36 @@ protected boolean isAllDataChecked() { @Override protected void updateActiveData() { - TimeColumnBuilder timeColumnBuilder = resultTsBlockBuilder.getTimeColumnBuilder(); - ColumnBuilder[] columnBuilders = resultTsBlockBuilder.getValueColumnBuilders(); - List activeDevices = ((RegionScanForActiveDeviceUtil) regionScanUtil).getActiveDevices(); - for (IDeviceID deviceID : activeDevices) { - timeColumnBuilder.writeLong(-1); - columnBuilders[0].writeBinary(new Binary(deviceID.toString(), TSFileConfig.STRING_CHARSET)); - columnBuilders[1].writeBinary( - new Binary( - String.valueOf(deviceToAlignedMap.get(deviceID)), TSFileConfig.STRING_CHARSET)); - columnBuilders[2].appendNull(); - columnBuilders[3].appendNull(); - resultTsBlockBuilder.declarePosition(); - deviceToAlignedMap.remove(deviceID); + + if (this.outputCount) { + count += activeDevices.size(); + activeDevices.forEach(deviceToAlignedMap.keySet()::remove); + } else { + TimeColumnBuilder timeColumnBuilder = resultTsBlockBuilder.getTimeColumnBuilder(); + ColumnBuilder[] columnBuilders = resultTsBlockBuilder.getValueColumnBuilders(); + for (IDeviceID deviceID : activeDevices) { + timeColumnBuilder.writeLong(-1); + columnBuilders[0].writeBinary(new Binary(deviceID.toString(), TSFileConfig.STRING_CHARSET)); + columnBuilders[1].writeBinary( + new Binary( + String.valueOf(deviceToAlignedMap.get(deviceID)), TSFileConfig.STRING_CHARSET)); + columnBuilders[2].appendNull(); + columnBuilders[3].appendNull(); + resultTsBlockBuilder.declarePosition(); + deviceToAlignedMap.remove(deviceID); + } } } @Override protected List getResultDataTypes() { + if (outputCount) { + return ColumnHeaderConstant.countDevicesColumnHeaders.stream() + .map(ColumnHeader::getColumnType) + .collect(Collectors.toList()); + } return ColumnHeaderConstant.showDevicesColumnHeaders.stream() .map(ColumnHeader::getColumnType) .collect(Collectors.toList()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/ActiveTimeSeriesRegionScanOperator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/ActiveTimeSeriesRegionScanOperator.java index 6df57b1140cb..c1f4cc951d00 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/ActiveTimeSeriesRegionScanOperator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/execution/operator/source/ActiveTimeSeriesRegionScanOperator.java @@ -53,7 +53,9 @@ public ActiveTimeSeriesRegionScanOperator( OperatorContext operatorContext, PlanNodeId sourceId, Map> timeSeriesToSchemasInfo, - Filter timeFilter) { + Filter timeFilter, + boolean isOutputCount) { + this.outputCount = isOutputCount; this.operatorContext = operatorContext; this.sourceId = sourceId; this.timeSeriesToSchemasInfo = timeSeriesToSchemasInfo; @@ -94,6 +96,16 @@ protected void updateActiveData() { Map> activeTimeSeries = ((RegionScanForActiveTimeSeriesUtil) regionScanUtil).getActiveTimeSeries(); + + if (outputCount) { + for (Map.Entry> entry : activeTimeSeries.entrySet()) { + List timeSeriesList = entry.getValue(); + count += timeSeriesList.size(); + removeTimeseriesListFromDevice(entry.getKey(), timeSeriesList); + } + return; + } + for (Map.Entry> entry : activeTimeSeries.entrySet()) { IDeviceID deviceID = entry.getKey(); String deviceStr = deviceID.toString(); @@ -116,11 +128,18 @@ protected void updateActiveData() { checkAndAppend(schemaInfo.getDeadbandParameters(), columnBuilders[9]); // DeadbandParameters columnBuilders[10].writeBinary(VIEW_TYPE); // ViewType resultTsBlockBuilder.declarePosition(); - timeSeriesInfo.remove(timeSeries); - } - if (timeSeriesInfo.isEmpty()) { - timeSeriesToSchemasInfo.remove(deviceID); } + removeTimeseriesListFromDevice(deviceID, timeSeriesList); + } + } + + private void removeTimeseriesListFromDevice(IDeviceID deviceID, List timeSeriesList) { + Map timeSeriesInfo = timeSeriesToSchemasInfo.get(deviceID); + for (String timeSeries : timeSeriesList) { + timeSeriesInfo.remove(timeSeries); + } + if (timeSeriesInfo.isEmpty()) { + timeSeriesToSchemasInfo.remove(deviceID); } } @@ -130,6 +149,11 @@ private byte[] contactDeviceAndMeasurement(String deviceStr, String measurementI @Override protected List getResultDataTypes() { + if (outputCount) { + return ColumnHeaderConstant.countTimeSeriesColumnHeaders.stream() + .map(ColumnHeader::getColumnType) + .collect(Collectors.toList()); + } return ColumnHeaderConstant.showTimeSeriesColumnHeaders.stream() .map(ColumnHeader::getColumnType) .collect(Collectors.toList()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java index 57ef4f9fd034..cf49f83c7ce3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/analyze/AnalyzeVisitor.java @@ -2945,6 +2945,7 @@ public Analysis visitShowTimeSeries( analyzeTimeseriesRegionScan( showTimeSeriesStatement.getTimeCondition(), patternTree, analysis, context); if (!hasSchema) { + analysis.setRespDatasetHeader(DatasetHeaderFactory.getShowTimeSeriesHeader()); return analysis; } } catch (IllegalPathException e) { @@ -3024,7 +3025,7 @@ private void removeLogicViewMeasurement(ISchemaTree schemaTree) { schemaTree.removeLogicalView(); } - private boolean analyzeDeviceRegionScan( + private void analyzeDeviceRegionScan( WhereCondition timeCondition, PathPatternTree patternTree, Analysis analysis, @@ -3036,7 +3037,7 @@ private boolean analyzeDeviceRegionScan( ISchemaTree schemaTree = schemaFetcher.fetchSchemaInDeviceLevel(patternTree, context); if (schemaTree.isEmpty()) { analysis.setFinishQueryAfterAnalyze(true); - return false; + return; } // fetch Data partition @@ -3054,7 +3055,6 @@ private boolean analyzeDeviceRegionScan( schemaTree, context); analysis.setDataPartitionInfo(dataPartition); - return true; } @Override @@ -3068,12 +3068,8 @@ public Analysis visitShowDevices( showDevicesStatement.getPathPattern().concatNode(IoTDBConstant.ONE_LEVEL_PATH_WILDCARD)); if (showDevicesStatement.hasTimeCondition()) { - boolean hasSchema = - analyzeDeviceRegionScan( - showDevicesStatement.getTimeCondition(), patternTree, analysis, context); - if (!hasSchema) { - return analysis; - } + analyzeDeviceRegionScan( + showDevicesStatement.getTimeCondition(), patternTree, analysis, context); } else { SchemaPartition schemaPartitionInfo = partitionFetcher.getSchemaPartition(patternTree); analysis.setSchemaPartitionInfo(schemaPartitionInfo); @@ -3134,12 +3130,8 @@ public Analysis visitCountDevices( patternTree.appendPathPattern( countDevicesStatement.getPathPattern().concatNode(IoTDBConstant.ONE_LEVEL_PATH_WILDCARD)); if (countDevicesStatement.hasTimeCondition()) { - boolean hasSchema = - analyzeDeviceRegionScan( - countDevicesStatement.getTimeCondition(), patternTree, analysis, context); - if (!hasSchema) { - return analysis; - } + analyzeDeviceRegionScan( + countDevicesStatement.getTimeCondition(), patternTree, analysis, context); } else { SchemaPartition schemaPartitionInfo = partitionFetcher.getSchemaPartition(patternTree); analysis.setSchemaPartitionInfo(schemaPartitionInfo); @@ -3164,6 +3156,7 @@ public Analysis visitCountTimeSeries( analyzeTimeseriesRegionScan( countTimeSeriesStatement.getTimeCondition(), patternTree, analysis, context); if (!hasSchema) { + analysis.setRespDatasetHeader(DatasetHeaderFactory.getCountTimeSeriesHeader()); return analysis; } } catch (IllegalPathException e) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java index a6d6680a1c60..641ec2db26d6 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/execution/config/executor/ClusterConfigTaskExecutor.java @@ -51,6 +51,7 @@ import org.apache.iotdb.commons.trigger.service.TriggerExecutableManager; import org.apache.iotdb.commons.udf.service.UDFClassLoader; import org.apache.iotdb.commons.udf.service.UDFExecutableManager; +import org.apache.iotdb.commons.utils.CommonDateTimeUtils; import org.apache.iotdb.commons.utils.TimePartitionUtils; import org.apache.iotdb.confignode.rpc.thrift.TAlterLogicalViewReq; import org.apache.iotdb.confignode.rpc.thrift.TAlterPipeReq; @@ -1933,7 +1934,12 @@ public SettableFuture createTopic(CreateTopicStatement createT // Validate topic config final TopicMeta temporaryTopicMeta = - new TopicMeta(topicName, System.currentTimeMillis(), topicAttributes); + new TopicMeta( + topicName, + CommonDateTimeUtils.convertMilliTimeWithPrecision( + System.currentTimeMillis(), + CommonDescriptor.getInstance().getConfig().getTimestampPrecision()), + topicAttributes); try { PipeAgent.plugin().validateExtractor(temporaryTopicMeta.generateExtractorAttributes()); PipeAgent.plugin().validateProcessor(temporaryTopicMeta.generateProcessorAttributes()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDown.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDown.java index 36cc13254c7e..df73a23dd18a 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDown.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDown.java @@ -335,7 +335,8 @@ public static void pushDownLimitOffsetToTimeParameter(QueryStatement queryStatem if (queryStatement.getResultTimeOrder() == Ordering.ASC) { startTime = startTime + offsetSize * step; } else { - startTime = startTime + (size - offsetSize - limitSize) * step; + long startTimeInterval = size - offsetSize - limitSize; + startTime = startTime + (startTimeInterval < 0 ? 0 : startTimeInterval) * step; } endTime = limitSize == 0 diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java index f2d37f06d75a..12ddcf2636c7 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/parser/ASTVisitor.java @@ -1981,7 +1981,10 @@ private void parseLoadFileAttributeClause( } else if (ctx.SGLEVEL() != null) { loadTsFileStatement.setDatabaseLevel(Integer.parseInt(ctx.INTEGER_LITERAL().getText())); } else if (ctx.VERIFY() != null) { - loadTsFileStatement.setVerifySchema(Boolean.parseBoolean(ctx.boolean_literal().getText())); + if (!Boolean.parseBoolean(ctx.boolean_literal().getText())) { + throw new SemanticException("Load option VERIFY can only be set to true."); + } + loadTsFileStatement.setVerifySchema(true); } else { throw new SemanticException( String.format( @@ -3554,6 +3557,9 @@ private TSDataType parseDataTypeAttribute(IoTDBSqlParser.AttributeClausesContext String dataTypeString = ctx.dataType.getText().toUpperCase(); try { dataType = TSDataType.valueOf(dataTypeString); + if (TSDataType.UNKNOWN.equals(dataType) || TSDataType.VECTOR.equals(dataType)) { + throw new SemanticException(String.format("Unsupported datatype: %s", dataTypeString)); + } } catch (Exception e) { throw new SemanticException(String.format("Unsupported datatype: %s", dataTypeString)); } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/OperatorTreeGenerator.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/OperatorTreeGenerator.java index 09d591ddde0e..692125063bd1 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/OperatorTreeGenerator.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/OperatorTreeGenerator.java @@ -2550,10 +2550,10 @@ public Operator visitIdentitySink(IdentitySinkNode node, LocalExecutionPlanConte localInstanceId.toThrift(), node.getPlanNodeId().getId(), context.getInstanceContext()); + List children = dealWithConsumeChildrenOneByOneNode(node, context); sinkHandle.setMaxBytesCanReserve(context.getMaxBytesOneHandleCanReserve()); context.getDriverContext().setSink(sinkHandle); - List children = dealWithConsumeChildrenOneByOneNode(node, context); return new IdentitySinkOperator(operatorContext, children, downStreamChannelIndex, sinkHandle); } @@ -3557,7 +3557,7 @@ public Operator visitDeviceRegionScan( } ActiveDeviceRegionScanOperator regionScanOperator = new ActiveDeviceRegionScanOperator( - operatorContext, node.getPlanNodeId(), deviceIDToAligned, filter); + operatorContext, node.getPlanNodeId(), deviceIDToAligned, filter, node.isOutputCount()); DataDriverContext dataDriverContext = (DataDriverContext) context.getDriverContext(); dataDriverContext.addSourceOperator(regionScanOperator); @@ -3590,7 +3590,11 @@ public Operator visitTimeSeriesRegionScan( } ActiveTimeSeriesRegionScanOperator regionScanOperator = new ActiveTimeSeriesRegionScanOperator( - operatorContext, node.getPlanNodeId(), timeseriesToSchemaInfo, filter); + operatorContext, + node.getPlanNodeId(), + timeseriesToSchemaInfo, + filter, + node.isOutputCount()); dataDriverContext.addSourceOperator(regionScanOperator); dataDriverContext.setQueryDataSourceType(QueryDataSourceType.TIME_SERIES_REGION_SCAN); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java index cf08323808e2..d15b774831f3 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/planner/distribution/SourceRewriter.java @@ -774,6 +774,7 @@ private List splitRegionScanNodeByRegion( RegionScanNode regionScanNode = (RegionScanNode) node.clone(); regionScanNode.setPlanNodeId(context.queryContext.getQueryId().genPlanNodeId()); regionScanNode.setRegionReplicaSet(dataRegion); + regionScanNode.setOutputCount(node.isOutputCount()); regionScanNode.clearPath(); return regionScanNode; }) diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/AsyncPlanNodeSender.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/AsyncPlanNodeSender.java index c004356a5f4e..230eb27941ee 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/AsyncPlanNodeSender.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/AsyncPlanNodeSender.java @@ -44,7 +44,7 @@ public class AsyncPlanNodeSender { - private static final Logger logger = LoggerFactory.getLogger(AsyncPlanNodeSender.class); + private static final Logger LOGGER = LoggerFactory.getLogger(AsyncPlanNodeSender.class); private final IClientManager asyncInternalServiceClientManager; private final List instances; @@ -116,14 +116,14 @@ public List getFailureStatusList() { status = entry.getValue().getStatus(); if (!entry.getValue().accepted) { if (status == null) { - logger.warn( + LOGGER.warn( "dispatch write failed. message: {}, node {}", entry.getValue().message, instances.get(entry.getKey()).getHostDataNode().getInternalEndPoint()); failureStatusList.add( RpcUtils.getStatus(TSStatusCode.WRITE_PROCESS_ERROR, entry.getValue().getMessage())); } else { - logger.warn( + LOGGER.warn( "dispatch write failed. status: {}, code: {}, message: {}, node {}", entry.getValue().status, TSStatusCode.representOf(status.code), diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/FragmentInstanceDispatcherImpl.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/FragmentInstanceDispatcherImpl.java index d401a6196f90..7fe941a6f838 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/FragmentInstanceDispatcherImpl.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/queryengine/plan/scheduler/FragmentInstanceDispatcherImpl.java @@ -25,6 +25,8 @@ import org.apache.iotdb.commons.client.async.AsyncDataNodeInternalServiceClient; import org.apache.iotdb.commons.client.exception.ClientManagerException; import org.apache.iotdb.commons.client.sync.SyncDataNodeInternalServiceClient; +import org.apache.iotdb.commons.conf.CommonConfig; +import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.commons.service.metric.PerformanceOverviewMetrics; import org.apache.iotdb.consensus.exception.RatisReadUnavailableException; @@ -66,8 +68,11 @@ public class FragmentInstanceDispatcherImpl implements IFragInstanceDispatcher { - private static final Logger logger = + private static final Logger LOGGER = LoggerFactory.getLogger(FragmentInstanceDispatcherImpl.class); + + private static final CommonConfig COMMON_CONFIG = CommonDescriptor.getInstance().getConfig(); + private final ExecutorService executor; private final ExecutorService writeOperationExecutor; private final QueryType type; @@ -126,7 +131,7 @@ private Future dispatchRead(List i } catch (FragmentInstanceDispatchException e) { return immediateFuture(new FragInstanceDispatchResult(e.getFailureStatus())); } catch (Throwable t) { - logger.warn(DISPATCH_FAILED, t); + LOGGER.warn(DISPATCH_FAILED, t); return immediateFuture( new FragInstanceDispatchResult( RpcUtils.getStatus( @@ -165,7 +170,7 @@ private Future dispatchWriteSync(List dispatchWriteAsync(List dispatchWriteAsync(List 0 + ? COMMON_CONFIG.getRemoteWriteMaxRetryDurationInMs() * 1_000_000L + : 0; + if (maxRetryDurationInNs > 0 && asyncPlanNodeSender.needRetry()) { // retry failed remote FIs - int retry = 0; - final int maxRetryTimes = 10; - long waitMillis = getRetrySleepTime(retry); + int retryCount = 0; + long waitMillis = getRetrySleepTime(retryCount); + long retryStartTime = System.nanoTime(); while (asyncPlanNodeSender.needRetry()) { - retry++; + retryCount++; asyncPlanNodeSender.retry(); - if (!(asyncPlanNodeSender.needRetry() && retry < maxRetryTimes)) { + // if !(still need retry and current time + next sleep time < maxRetryDurationInNs) + if (!(asyncPlanNodeSender.needRetry() + && (System.nanoTime() - retryStartTime + waitMillis * 1_000_000L) + < maxRetryDurationInNs)) { break; } // still need to retry, sleep some time before make another retry. Thread.sleep(waitMillis); - waitMillis = getRetrySleepTime(retry); + PERFORMANCE_OVERVIEW_METRICS.recordRemoteRetrySleepCost(waitMillis * 1_000_000L); + waitMillis = getRetrySleepTime(retryCount); } } } catch (InterruptedException e) { Thread.currentThread().interrupt(); - logger.error("Interrupted when dispatching write async", e); + LOGGER.error("Interrupted when dispatching write async", e); return immediateFuture( new FragInstanceDispatchResult( RpcUtils.getStatus( @@ -308,7 +320,7 @@ private void dispatchRemoteHelper(FragmentInstance instance, TEndPoint endPoint) TSendFragmentInstanceResp sendFragmentInstanceResp = client.sendFragmentInstance(sendFragmentInstanceReq); if (!sendFragmentInstanceResp.accepted) { - logger.warn(sendFragmentInstanceResp.message); + LOGGER.warn(sendFragmentInstanceResp.message); if (sendFragmentInstanceResp.isSetNeedRetry() && sendFragmentInstanceResp.isNeedRetry()) { throw new RatisReadUnavailableException(sendFragmentInstanceResp.message); @@ -330,7 +342,7 @@ private void dispatchRemoteHelper(FragmentInstance instance, TEndPoint endPoint) TSendSinglePlanNodeResp sendPlanNodeResp = client.sendBatchPlanNode(sendPlanNodeReq).getResponses().get(0); if (!sendPlanNodeResp.accepted) { - logger.warn( + LOGGER.warn( "dispatch write failed. status: {}, code: {}, message: {}, node {}", sendPlanNodeResp.status, TSStatusCode.representOf(sendPlanNodeResp.status.code), @@ -366,7 +378,7 @@ private void dispatchRemote(FragmentInstance instance, TEndPoint endPoint) try { dispatchRemoteHelper(instance, endPoint); } catch (ClientManagerException | TException | RatisReadUnavailableException e) { - logger.warn( + LOGGER.warn( "can't execute request on node {}, error msg is {}, and we try to reconnect this node.", endPoint, ExceptionUtils.getRootCause(e).toString()); @@ -374,7 +386,7 @@ private void dispatchRemote(FragmentInstance instance, TEndPoint endPoint) try { dispatchRemoteHelper(instance, endPoint); } catch (ClientManagerException | TException | RatisReadUnavailableException e1) { - logger.warn( + LOGGER.warn( "can't execute request on node {} in second try, error msg is {}.", endPoint, ExceptionUtils.getRootCause(e1).toString()); @@ -398,7 +410,7 @@ private void dispatchLocally(FragmentInstance instance) throws FragmentInstanceD ConsensusGroupId.Factory.createFromTConsensusGroupId( instance.getRegionReplicaSet().getRegionId()); } catch (Throwable t) { - logger.warn("Deserialize ConsensusGroupId failed. ", t); + LOGGER.warn("Deserialize ConsensusGroupId failed. ", t); throw new FragmentInstanceDispatchException( RpcUtils.getStatus( TSStatusCode.EXECUTE_STATEMENT_ERROR, @@ -414,7 +426,7 @@ private void dispatchLocally(FragmentInstance instance) throws FragmentInstanceD ? readExecutor.execute(instance) : readExecutor.execute(groupId, instance); if (!readResult.isAccepted()) { - logger.warn(readResult.getMessage()); + LOGGER.warn(readResult.getMessage()); throw new FragmentInstanceDispatchException( RpcUtils.getStatus(TSStatusCode.EXECUTE_STATEMENT_ERROR, readResult.getMessage())); } @@ -426,7 +438,7 @@ private void dispatchLocally(FragmentInstance instance) throws FragmentInstanceD if (!writeResult.isAccepted()) { // DO NOT LOG READ_ONLY ERROR if (writeResult.getStatus().getCode() != TSStatusCode.SYSTEM_READ_ONLY.getStatusCode()) { - logger.warn( + LOGGER.warn( "write locally failed. TSStatus: {}, message: {}", writeResult.getStatus(), writeResult.getMessage()); diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/recover/CompactionRecoverManager.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/recover/CompactionRecoverManager.java index 0fc17ed92401..91840e1546d8 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/recover/CompactionRecoverManager.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/compaction/execute/recover/CompactionRecoverManager.java @@ -86,7 +86,7 @@ public void recoverCompaction() { } for (File timePartitionDir : timePartitionDirs) { if (!timePartitionDir.isDirectory() - || !Pattern.compile("\\d*").matcher(timePartitionDir.getName()).matches()) { + || !Pattern.compile("-?\\d+").matcher(timePartitionDir.getName()).matches()) { continue; } logger.info( diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java index 3b7fc6d3d273..dc02fbcda54d 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/TsFileResource.java @@ -390,12 +390,30 @@ public long getTsFileSize() { } public long getStartTime(IDeviceID deviceId) { - return deviceId == null ? getFileStartTime() : timeIndex.getStartTime(deviceId); + try { + return deviceId == null ? getFileStartTime() : timeIndex.getStartTime(deviceId); + } catch (Exception e) { + LOGGER.error( + "meet error when getStartTime of {} in file {}", deviceId, file.getAbsolutePath(), e); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("TimeIndex = {}", timeIndex); + } + throw e; + } } /** open file's end time is Long.MIN_VALUE */ public long getEndTime(IDeviceID deviceId) { - return deviceId == null ? getFileEndTime() : timeIndex.getEndTime(deviceId); + try { + return deviceId == null ? getFileEndTime() : timeIndex.getEndTime(deviceId); + } catch (Exception e) { + LOGGER.error( + "meet error when getEndTime of {} in file {}", deviceId, file.getAbsolutePath(), e); + if (LOGGER.isDebugEnabled()) { + LOGGER.debug("TimeIndex = {}", timeIndex); + } + throw e; + } } public long getOrderTime(IDeviceID deviceId, boolean ascending) { diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/timeindex/ArrayDeviceTimeIndex.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/timeindex/ArrayDeviceTimeIndex.java index 86a610063d7f..d220e9f9e702 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/timeindex/ArrayDeviceTimeIndex.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/timeindex/ArrayDeviceTimeIndex.java @@ -442,4 +442,20 @@ public Pair getPossibleStartTimeAndEndTime( public byte getTimeIndexType() { return ARRAY_DEVICE_TIME_INDEX_TYPE; } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder(); + builder.append(" DeviceIndexMapSize = ").append(deviceToIndex.size()); + builder.append(" startTimeLength = ").append(startTimes.length); + builder.append(" endTimeLength = ").append(endTimes.length); + builder.append(" DeviceIndexMap = ["); + deviceToIndex.forEach( + (key, value) -> + builder.append(" device = ").append(key).append(", index = ").append(value)); + builder.append("]"); + builder.append(" StartTimes = ").append(Arrays.toString(startTimes)); + builder.append(" EndTimes = ").append(Arrays.toString(endTimes)); + return builder.toString(); + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/timeindex/FileTimeIndex.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/timeindex/FileTimeIndex.java index 74df26bc2752..c9820d809f1e 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/timeindex/FileTimeIndex.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/storageengine/dataregion/tsfile/timeindex/FileTimeIndex.java @@ -251,4 +251,9 @@ public Pair getPossibleStartTimeAndEndTime( public byte getTimeIndexType() { return FILE_TIME_INDEX_TYPE; } + + @Override + public String toString() { + return " StartTime = " + startTime + " EndTime = " + endTime; + } } diff --git a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/receiver/SubscriptionReceiverV1.java b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/receiver/SubscriptionReceiverV1.java index 16d157e18479..8339c3d3c881 100644 --- a/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/receiver/SubscriptionReceiverV1.java +++ b/iotdb-core/datanode/src/main/java/org/apache/iotdb/db/subscription/receiver/SubscriptionReceiverV1.java @@ -399,18 +399,21 @@ private TPipeSubscribeResp handlePipeSubscribePoll(final PipeSubscribePollReq re private List handlePipeSubscribePollInternal( final ConsumerConfig consumerConfig, final PollPayload messagePayload) { + final Set subscribedTopicNames = + SubscriptionAgent.consumer() + .getTopicsSubscribedByConsumer( + consumerConfig.getConsumerGroupId(), consumerConfig.getConsumerId()); final Set topicNames; if (messagePayload.getTopicNames().isEmpty()) { // poll all subscribed topics - topicNames = - SubscriptionAgent.consumer() - .getTopicsSubscribedByConsumer( - consumerConfig.getConsumerGroupId(), consumerConfig.getConsumerId()); + topicNames = subscribedTopicNames; } else { topicNames = messagePayload.getTopicNames().stream() .map(ASTVisitor::parseIdentifier) .collect(Collectors.toSet()); + // filter unsubscribed topics + topicNames.removeIf((topicName) -> !subscribedTopicNames.contains(topicName)); } return SubscriptionAgent.broker().poll(consumerConfig, topicNames); diff --git a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDownTest.java b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDownTest.java index f550fde25fb0..d46469944842 100644 --- a/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDownTest.java +++ b/iotdb-core/datanode/src/test/java/org/apache/iotdb/db/queryengine/plan/optimization/LimitOffsetPushDownTest.java @@ -322,6 +322,34 @@ public void testGroupByTimePushDown11() { checkGroupByTimePushDown(sql, 154, 354, 0, 0); } + @Test + public void testGroupByTimePushDown12() { + String sql = + "select avg(s1),sum(s2) from root.** group by ([4, 899), 200ms) order by time desc limit 3"; + checkGroupByTimePushDown(sql, 404, 899, 0, 0); + } + + @Test + public void testGroupByTimePushDown13() { + String sql = + "select avg(s1),sum(s2) from root.** group by ([4, 899), 200ms) order by time desc limit 5"; + checkGroupByTimePushDown(sql, 4, 899, 0, 0); + } + + @Test + public void testGroupByTimePushDown14() { + String sql = + "select avg(s1),sum(s2) from root.** group by ([4, 899), 200ms) order by time desc offset 2 limit 5"; + checkGroupByTimePushDown(sql, 4, 899, 0, 0); + } + + @Test + public void testGroupByTimePushDown15() { + String sql = + "select avg(s1),sum(s2) from root.** group by ([4, 899), 200ms) order by time desc limit 6"; + checkGroupByTimePushDown(sql, 4, 899, 0, 0); + } + private void checkGroupByTimePushDown( String sql, long startTime, long endTime, long rowLimit, long rowOffset) { QueryStatement queryStatement = diff --git a/iotdb-core/metrics/core/pom.xml b/iotdb-core/metrics/core/pom.xml index ee603204b5b8..69a1f16b4665 100644 --- a/iotdb-core/metrics/core/pom.xml +++ b/iotdb-core/metrics/core/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-metrics - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT metrics-core IoTDB: Core: Metrics: API Impl @@ -32,7 +32,7 @@ org.apache.iotdb metrics-interface - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT io.micrometer diff --git a/iotdb-core/metrics/interface/pom.xml b/iotdb-core/metrics/interface/pom.xml index 119839227663..bd7c2cccfb5c 100644 --- a/iotdb-core/metrics/interface/pom.xml +++ b/iotdb-core/metrics/interface/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-metrics - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT metrics-interface IoTDB: Core: Metrics: Metrics API @@ -33,17 +33,17 @@ org.apache.iotdb iotdb-session - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb isession - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb service-rpc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.tsfile diff --git a/iotdb-core/metrics/pom.xml b/iotdb-core/metrics/pom.xml index 97a0d7c5456a..1c2bac7c31ec 100644 --- a/iotdb-core/metrics/pom.xml +++ b/iotdb-core/metrics/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-core - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-metrics pom diff --git a/iotdb-core/node-commons/pom.xml b/iotdb-core/node-commons/pom.xml index 37371509d0cf..421f791facec 100644 --- a/iotdb-core/node-commons/pom.xml +++ b/iotdb-core/node-commons/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-core - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT node-commons IoTDB: Core: Node Commons @@ -38,7 +38,7 @@ org.apache.iotdb service-rpc - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.tsfile @@ -48,37 +48,37 @@ org.apache.iotdb udf-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb trigger-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb pipe-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-confignode - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb iotdb-thrift-consensus - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.tsfile @@ -88,12 +88,12 @@ org.apache.iotdb metrics-interface - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.iotdb metrics-core - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.slf4j @@ -155,6 +155,10 @@ org.apache.commons commons-jexl3 + + com.github.luben + zstd-jni + org.mockito mockito-core diff --git a/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-system.properties b/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-system.properties index b92142ebe631..958765e523f0 100644 --- a/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-system.properties +++ b/iotdb-core/node-commons/src/assembly/resources/conf/iotdb-system.properties @@ -1849,3 +1849,21 @@ data_replication_factor=1 # Default value is -1, which means no limit. # Datatype: int # load_write_throughput_bytes_per_second=-1 + +#################### +### Dispatch Retry Configuration +#################### + +# The maximum retrying time for write request remotely dispatching, time unit is milliseconds. +# It only takes effect for write request remotely dispatching, not including locally dispatching and query +# Set to 0 or negative number to disable remote dispatching write request retrying +# We will sleep for some time between each retry, 100ms, 200ms, 400ms, 800ms and so on, util reaching 20,000ms, we won't increase the sleeping time any more +# effectiveMode: hot_reload +# Datatype: long +# write_request_remote_dispatch_max_retry_duration_in_ms=60000 + +# Whether retrying for unknown errors. +# Current unknown errors includes EXECUTE_STATEMENT_ERROR(301) and INTERNAL_SERVER_ERROR(305) +# effectiveMode: hot_reload +# Datatype: boolean +# enable_retry_for_unknown_error=false diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java index b78f7dcb6fbf..1de7f33df3a8 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonConfig.java @@ -280,6 +280,10 @@ public class CommonConfig { private final Set enabledKillPoints = KillPoint.parseKillPoints(System.getProperty(IoTDBConstant.INTEGRATION_TEST_KILL_POINTS)); + private volatile boolean retryForUnknownErrors = false; + + private volatile long remoteWriteMaxRetryDurationInMs = 60000; + CommonConfig() { // Empty constructor } @@ -1210,4 +1214,20 @@ public boolean isIntegrationTest() { public Set getEnabledKillPoints() { return enabledKillPoints; } + + public boolean isRetryForUnknownErrors() { + return retryForUnknownErrors; + } + + public void setRetryForUnknownErrors(boolean retryForUnknownErrors) { + this.retryForUnknownErrors = retryForUnknownErrors; + } + + public long getRemoteWriteMaxRetryDurationInMs() { + return remoteWriteMaxRetryDurationInMs; + } + + public void setRemoteWriteMaxRetryDurationInMs(long remoteWriteMaxRetryDurationInMs) { + this.remoteWriteMaxRetryDurationInMs = remoteWriteMaxRetryDurationInMs; + } } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonDescriptor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonDescriptor.java index 5979a6f56e5e..3d42596e1fee 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonDescriptor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/conf/CommonDescriptor.java @@ -246,6 +246,8 @@ public void loadCommonProps(Properties properties) { properties.getProperty( "cluster_device_limit_threshold", String.valueOf(config.getDeviceLimitThreshold())))); + + loadRetryProperties(properties); } private void loadPipeProps(Properties properties) { @@ -614,6 +616,20 @@ private void loadSubscriptionProps(Properties properties) { String.valueOf(config.getSubscriptionReadFileBufferSize())))); } + public void loadRetryProperties(Properties properties) { + config.setRemoteWriteMaxRetryDurationInMs( + Long.parseLong( + properties.getProperty( + "write_request_remote_dispatch_max_retry_duration_in_ms", + String.valueOf(config.getRemoteWriteMaxRetryDurationInMs())))); + + config.setRetryForUnknownErrors( + Boolean.parseBoolean( + properties.getProperty( + "enable_retry_for_unknown_error", + String.valueOf(config.isRetryForUnknownErrors())))); + } + public void loadGlobalConfig(TGlobalConfig globalConfig) { config.setTimestampPrecision(globalConfig.timestampPrecision); config.setTimePartitionInterval( diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java index 3d0bd4cab746..8d6879ae8c23 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/config/constant/PipeConnectorConstant.java @@ -22,6 +22,8 @@ import org.apache.iotdb.commons.conf.CommonDescriptor; import org.apache.iotdb.commons.pipe.config.PipeConfig; +import com.github.luben.zstd.Zstd; + import java.io.File; import java.util.Arrays; import java.util.Collections; @@ -177,6 +179,14 @@ public class PipeConnectorConstant { CONNECTOR_COMPRESSOR_ZSTD, CONNECTOR_COMPRESSOR_LZMA2))); + public static final String CONNECTOR_COMPRESSOR_ZSTD_LEVEL_KEY = + "connector.compressor.zstd.level"; + public static final String SINK_COMPRESSOR_ZSTD_LEVEL_KEY = "sink.compressor.zstd.level"; + public static final int CONNECTOR_COMPRESSOR_ZSTD_LEVEL_DEFAULT_VALUE = + Zstd.defaultCompressionLevel(); + public static final int CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MIN_VALUE = Zstd.minCompressionLevel(); + public static final int CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MAX_VALUE = Zstd.maxCompressionLevel(); + public static final String CONNECTOR_RATE_LIMIT_KEY = "connector.rate-limit-bytes-per-second"; public static final String SINK_RATE_LIMIT_KEY = "sink.rate-limit-bytes-per-second"; public static final double CONNECTOR_RATE_LIMIT_DEFAULT_VALUE = -1; diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/compressor/PipeCompressorConfig.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/compressor/PipeCompressorConfig.java new file mode 100644 index 000000000000..c13028c4d31f --- /dev/null +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/compressor/PipeCompressorConfig.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iotdb.commons.pipe.connector.compressor; + +public class PipeCompressorConfig { + + private final String name; + private final int zstdCompressionLevel; + + public PipeCompressorConfig(String name, int zstdCompressionLevel) { + this.name = name; + this.zstdCompressionLevel = zstdCompressionLevel; + } + + public String getName() { + return name; + } + + public int getZstdCompressionLevel() { + return zstdCompressionLevel; + } +} diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/compressor/PipeCompressorFactory.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/compressor/PipeCompressorFactory.java index 14ae972fc4f0..bcee9b85aebe 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/compressor/PipeCompressorFactory.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/compressor/PipeCompressorFactory.java @@ -19,50 +19,86 @@ package org.apache.iotdb.commons.pipe.connector.compressor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_GZIP; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_LZ4; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_LZMA2; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_SNAPPY; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD; +import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD_LEVEL_DEFAULT_VALUE; public class PipeCompressorFactory { - private static Map COMPRESSOR_NAME_TO_INSTANCE = new HashMap<>(); + private static final Logger LOGGER = LoggerFactory.getLogger(PipeCompressorFactory.class); + + private static final Map COMPRESSOR_NAME_TO_INSTANCE = + new ConcurrentHashMap<>(); static { COMPRESSOR_NAME_TO_INSTANCE.put(CONNECTOR_COMPRESSOR_SNAPPY, new PipeSnappyCompressor()); COMPRESSOR_NAME_TO_INSTANCE.put(CONNECTOR_COMPRESSOR_GZIP, new PipeGZIPCompressor()); COMPRESSOR_NAME_TO_INSTANCE.put(CONNECTOR_COMPRESSOR_LZ4, new PipeLZ4Compressor()); - COMPRESSOR_NAME_TO_INSTANCE.put(CONNECTOR_COMPRESSOR_ZSTD, new PipeZSTDCompressor()); + COMPRESSOR_NAME_TO_INSTANCE.put( + CONNECTOR_COMPRESSOR_ZSTD, + new PipeZSTDCompressor(CONNECTOR_COMPRESSOR_ZSTD_LEVEL_DEFAULT_VALUE)); COMPRESSOR_NAME_TO_INSTANCE.put(CONNECTOR_COMPRESSOR_LZMA2, new PipeLZMA2Compressor()); - COMPRESSOR_NAME_TO_INSTANCE = Collections.unmodifiableMap(COMPRESSOR_NAME_TO_INSTANCE); } - public static PipeCompressor getCompressor(String name) { - final PipeCompressor compressor = COMPRESSOR_NAME_TO_INSTANCE.get(name); - if (compressor == null) { - throw new UnsupportedOperationException("PipeCompressor not found for name: " + name); + public static PipeCompressor getCompressor(PipeCompressorConfig config) { + if (config == null) { + throw new IllegalArgumentException("PipeCompressorConfig is null"); } - return compressor; + if (config.getName() == null) { + throw new IllegalArgumentException("PipeCompressorConfig.getName() is null"); + } + + final String compressorName = config.getName(); + + // For ZSTD compressor, we need to consider the compression level + if (compressorName.equals(CONNECTOR_COMPRESSOR_ZSTD)) { + final int zstdCompressionLevel = config.getZstdCompressionLevel(); + return COMPRESSOR_NAME_TO_INSTANCE.computeIfAbsent( + CONNECTOR_COMPRESSOR_ZSTD + "_" + zstdCompressionLevel, + key -> { + LOGGER.info("Create new PipeZSTDCompressor with level: {}", zstdCompressionLevel); + return new PipeZSTDCompressor(zstdCompressionLevel); + }); + } + + // For other compressors, we can directly get the instance by name + final PipeCompressor compressor = COMPRESSOR_NAME_TO_INSTANCE.get(compressorName); + if (compressor != null) { + return compressor; + } + + throw new UnsupportedOperationException("PipeCompressor not found for name: " + compressorName); } private static Map COMPRESSOR_INDEX_TO_INSTANCE = new HashMap<>(); static { COMPRESSOR_INDEX_TO_INSTANCE.put( - PipeCompressor.PipeCompressionType.SNAPPY.getIndex(), new PipeSnappyCompressor()); + PipeCompressor.PipeCompressionType.SNAPPY.getIndex(), + COMPRESSOR_NAME_TO_INSTANCE.get(CONNECTOR_COMPRESSOR_SNAPPY)); COMPRESSOR_INDEX_TO_INSTANCE.put( - PipeCompressor.PipeCompressionType.GZIP.getIndex(), new PipeGZIPCompressor()); + PipeCompressor.PipeCompressionType.GZIP.getIndex(), + COMPRESSOR_NAME_TO_INSTANCE.get(CONNECTOR_COMPRESSOR_GZIP)); COMPRESSOR_INDEX_TO_INSTANCE.put( - PipeCompressor.PipeCompressionType.LZ4.getIndex(), new PipeLZ4Compressor()); + PipeCompressor.PipeCompressionType.LZ4.getIndex(), + COMPRESSOR_NAME_TO_INSTANCE.get(CONNECTOR_COMPRESSOR_LZ4)); COMPRESSOR_INDEX_TO_INSTANCE.put( - PipeCompressor.PipeCompressionType.ZSTD.getIndex(), new PipeZSTDCompressor()); + PipeCompressor.PipeCompressionType.ZSTD.getIndex(), + COMPRESSOR_NAME_TO_INSTANCE.get(CONNECTOR_COMPRESSOR_ZSTD)); COMPRESSOR_INDEX_TO_INSTANCE.put( - PipeCompressor.PipeCompressionType.LZMA2.getIndex(), new PipeLZMA2Compressor()); + PipeCompressor.PipeCompressionType.LZMA2.getIndex(), + COMPRESSOR_NAME_TO_INSTANCE.get(CONNECTOR_COMPRESSOR_LZMA2)); COMPRESSOR_INDEX_TO_INSTANCE = Collections.unmodifiableMap(COMPRESSOR_INDEX_TO_INSTANCE); } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/compressor/PipeZSTDCompressor.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/compressor/PipeZSTDCompressor.java index 72782353d689..50e2e1f845cf 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/compressor/PipeZSTDCompressor.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/compressor/PipeZSTDCompressor.java @@ -19,29 +19,26 @@ package org.apache.iotdb.commons.pipe.connector.compressor; -import org.apache.tsfile.compress.ICompressor; -import org.apache.tsfile.compress.IUnCompressor; -import org.apache.tsfile.file.metadata.enums.CompressionType; +import com.github.luben.zstd.Zstd; import java.io.IOException; public class PipeZSTDCompressor extends PipeCompressor { - private static final ICompressor COMPRESSOR = ICompressor.getCompressor(CompressionType.ZSTD); - private static final IUnCompressor DECOMPRESSOR = - IUnCompressor.getUnCompressor(CompressionType.ZSTD); + private final int compressionLevel; - public PipeZSTDCompressor() { + public PipeZSTDCompressor(int compressionLevel) { super(PipeCompressionType.ZSTD); + this.compressionLevel = compressionLevel; } @Override public byte[] compress(byte[] data) throws IOException { - return COMPRESSOR.compress(data); + return Zstd.compress(data, compressionLevel); } @Override - public byte[] decompress(byte[] byteArray) throws IOException { - return DECOMPRESSOR.uncompress(byteArray); + public byte[] decompress(byte[] byteArray) { + return Zstd.decompress(byteArray, (int) Zstd.decompressedSize(byteArray, 0, byteArray.length)); } } diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/protocol/IoTDBConnector.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/protocol/IoTDBConnector.java index f65daf86d4ff..ad8e328e45e0 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/protocol/IoTDBConnector.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/pipe/connector/protocol/IoTDBConnector.java @@ -22,6 +22,7 @@ import org.apache.iotdb.common.rpc.thrift.TEndPoint; import org.apache.iotdb.commons.pipe.connector.PipeReceiverStatusHandler; import org.apache.iotdb.commons.pipe.connector.compressor.PipeCompressor; +import org.apache.iotdb.commons.pipe.connector.compressor.PipeCompressorConfig; import org.apache.iotdb.commons.pipe.connector.compressor.PipeCompressorFactory; import org.apache.iotdb.commons.pipe.connector.limiter.GlobalRateLimiter; import org.apache.iotdb.commons.pipe.connector.limiter.PipeEndPointRateLimiter; @@ -50,6 +51,10 @@ import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_DEFAULT_VALUE; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_KEY; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_SET; +import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD_LEVEL_DEFAULT_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD_LEVEL_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MAX_VALUE; +import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MIN_VALUE; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_DEFAULT_VALUE; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_KEY; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_DEFAULT_VALUE; @@ -72,6 +77,7 @@ import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_RATE_LIMIT_DEFAULT_VALUE; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.CONNECTOR_RATE_LIMIT_KEY; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_COMPRESSOR_KEY; +import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_COMPRESSOR_ZSTD_LEVEL_KEY; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_EXCEPTION_CONFLICT_RECORD_IGNORED_DATA_KEY; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_EXCEPTION_CONFLICT_RESOLVE_STRATEGY_KEY; import static org.apache.iotdb.commons.pipe.config.constant.PipeConnectorConstant.SINK_EXCEPTION_CONFLICT_RETRY_MAX_TIME_SECONDS_KEY; @@ -151,6 +157,21 @@ public void validate(PipeParameterValidator validator) throws Exception { CONNECTOR_LOAD_BALANCE_STRATEGY_SET, loadBalanceStrategy), loadBalanceStrategy); + final int zstdCompressionLevel = + parameters.getIntOrDefault( + Arrays.asList(CONNECTOR_COMPRESSOR_ZSTD_LEVEL_KEY, SINK_COMPRESSOR_ZSTD_LEVEL_KEY), + CONNECTOR_COMPRESSOR_ZSTD_LEVEL_DEFAULT_VALUE); + validator.validate( + arg -> + (int) arg >= CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MIN_VALUE + && (int) arg <= CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MAX_VALUE, + String.format( + "Zstd compression level should be in the range [%d, %d], but got %d.", + CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MIN_VALUE, + CONNECTOR_COMPRESSOR_ZSTD_LEVEL_MAX_VALUE, + zstdCompressionLevel), + zstdCompressionLevel); + final String compressionTypes = parameters .getStringOrDefault( @@ -170,7 +191,9 @@ public void validate(PipeParameterValidator validator) throws Exception { "Compressor should be one of %s, but got %s.", CONNECTOR_COMPRESSOR_SET, trimmedCompressionType), trimmedCompressionType); - compressors.add(PipeCompressorFactory.getCompressor(trimmedCompressionType)); + compressors.add( + PipeCompressorFactory.getCompressor( + new PipeCompressorConfig(trimmedCompressionType, zstdCompressionLevel))); } } validator.validate( diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/PerformanceOverviewMetrics.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/PerformanceOverviewMetrics.java index 1efbe6263354..14e395878348 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/PerformanceOverviewMetrics.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/service/metric/PerformanceOverviewMetrics.java @@ -111,6 +111,7 @@ public void recordScheduleCost(long costTimeInNanos) { Metric.PERFORMANCE_OVERVIEW_SCHEDULE_DETAIL.toString(); private static final String LOCAL_SCHEDULE = "local_scheduler"; private static final String REMOTE_SCHEDULE = "remote_scheduler"; + private static final String REMOTE_RETRY_SLEEP = "remote_retry"; static { metricInfoMap.put( @@ -127,10 +128,18 @@ public void recordScheduleCost(long costTimeInNanos) { PERFORMANCE_OVERVIEW_SCHEDULE_DETAIL, Tag.STAGE.toString(), REMOTE_SCHEDULE)); + metricInfoMap.put( + REMOTE_RETRY_SLEEP, + new MetricInfo( + MetricType.TIMER, + PERFORMANCE_OVERVIEW_SCHEDULE_DETAIL, + Tag.STAGE.toString(), + REMOTE_RETRY_SLEEP)); } private Timer localScheduleTimer = DoNothingMetricManager.DO_NOTHING_TIMER; private Timer remoteScheduleTimer = DoNothingMetricManager.DO_NOTHING_TIMER; + private Timer remoteRetrySleepTimer = DoNothingMetricManager.DO_NOTHING_TIMER; /** Record the time cost of local schedule. */ public void recordScheduleLocalCost(long costTimeInNanos) { @@ -142,6 +151,11 @@ public void recordScheduleRemoteCost(long costTimeInNanos) { remoteScheduleTimer.updateNanos(costTimeInNanos); } + /** Record the time cost of remote schedule. */ + public void recordRemoteRetrySleepCost(long costTimeInNanos) { + remoteRetrySleepTimer.updateNanos(costTimeInNanos); + } + // endregion // region local schedule @@ -327,6 +341,13 @@ public void bindTo(AbstractMetricService metricService) { MetricLevel.CORE, Tag.STAGE.toString(), REMOTE_SCHEDULE); + remoteRetrySleepTimer = + metricService.getOrCreateTimer( + PERFORMANCE_OVERVIEW_SCHEDULE_DETAIL, + MetricLevel.CORE, + Tag.STAGE.toString(), + REMOTE_RETRY_SLEEP); + // bind local schedule metrics schemaValidateTimer = metricService.getOrCreateTimer( diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/subscription/meta/topic/TopicMeta.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/subscription/meta/topic/TopicMeta.java index 33f5065cbae6..a0e80db04e06 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/subscription/meta/topic/TopicMeta.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/subscription/meta/topic/TopicMeta.java @@ -39,7 +39,7 @@ public class TopicMeta { private String topicName; - private long creationTime; + private long creationTime; // raw timestamp based on system timestamp precision private TopicConfig config; private Set subscribedConsumerGroupIds; diff --git a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java index be32bf5a4597..97811bc76a94 100644 --- a/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java +++ b/iotdb-core/node-commons/src/main/java/org/apache/iotdb/commons/utils/StatusUtils.java @@ -40,11 +40,16 @@ private StatusUtils() {} private static final Set NEED_RETRY = new HashSet<>(); + private static final Set UNKNOWN_ERRORS = new HashSet<>(); + private static final CommonConfig COMMON_CONFIG = CommonDescriptor.getInstance().getConfig(); static { - NEED_RETRY.add(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); - NEED_RETRY.add(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()); + // UNKNOWN ERRORS + UNKNOWN_ERRORS.add(TSStatusCode.EXECUTE_STATEMENT_ERROR.getStatusCode()); + UNKNOWN_ERRORS.add(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()); + + // KNOWN ERRORS NEED_RETRY.add(TSStatusCode.DISPATCH_ERROR.getStatusCode()); NEED_RETRY.add(TSStatusCode.SYSTEM_READ_ONLY.getStatusCode()); NEED_RETRY.add(TSStatusCode.STORAGE_ENGINE_NOT_READY.getStatusCode()); @@ -213,14 +218,23 @@ public static boolean needRetryHelper(TSStatus status) { int code = status.getCode(); if (code == TSStatusCode.MULTIPLE_ERROR.getStatusCode()) { for (TSStatus subStatus : status.subStatus) { + // any sub codes for MULTIPLE_ERROR don't need to retry, we won't retry for the whole + // request if (subStatus == null - || (subStatus.getCode() != OK.code && !NEED_RETRY.contains(subStatus.getCode()))) { + || (subStatus.getCode() != OK.code + && !needRetryHelperForSingleStatus(subStatus.getCode()))) { return false; } } return true; } else { - return NEED_RETRY.contains(code); + return needRetryHelperForSingleStatus(code); } } + + // without MULTIPLE_ERROR(302) + private static boolean needRetryHelperForSingleStatus(int statusCode) { + return NEED_RETRY.contains(statusCode) + || (COMMON_CONFIG.isRetryForUnknownErrors() && UNKNOWN_ERRORS.contains(statusCode)); + } } diff --git a/iotdb-core/pom.xml b/iotdb-core/pom.xml index df6c18ca7250..c7382b3eb230 100644 --- a/iotdb-core/pom.xml +++ b/iotdb-core/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-core pom diff --git a/iotdb-core/relational-grammar/pom.xml b/iotdb-core/relational-grammar/pom.xml index 58e3351b13ca..95d4dcff43ab 100644 --- a/iotdb-core/relational-grammar/pom.xml +++ b/iotdb-core/relational-grammar/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-core - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-relational-grammar IoTDB: Core: Relational-Antlr-Parser diff --git a/iotdb-protocol/openapi/pom.xml b/iotdb-protocol/openapi/pom.xml index 62790ffc4ff8..316cd2fb1d5e 100644 --- a/iotdb-protocol/openapi/pom.xml +++ b/iotdb-protocol/openapi/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-protocol - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT openapi IoTDB: Protocol: OpenAPI diff --git a/iotdb-protocol/pom.xml b/iotdb-protocol/pom.xml index acd4f61f66eb..286a2a26e805 100644 --- a/iotdb-protocol/pom.xml +++ b/iotdb-protocol/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-protocol pom diff --git a/iotdb-protocol/thrift-commons/pom.xml b/iotdb-protocol/thrift-commons/pom.xml index 931c16c71b3d..bb962d4248f3 100644 --- a/iotdb-protocol/thrift-commons/pom.xml +++ b/iotdb-protocol/thrift-commons/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-protocol - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-thrift-commons IoTDB: Protocol: Thrift Commons diff --git a/iotdb-protocol/thrift-confignode/pom.xml b/iotdb-protocol/thrift-confignode/pom.xml index cd0592f02770..9d38000f4598 100644 --- a/iotdb-protocol/thrift-confignode/pom.xml +++ b/iotdb-protocol/thrift-confignode/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-protocol - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-thrift-confignode IoTDB: Protocol: Thrift Config Node @@ -41,7 +41,7 @@ org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT diff --git a/iotdb-protocol/thrift-consensus/pom.xml b/iotdb-protocol/thrift-consensus/pom.xml index 42226c9df6c2..e43f970dbd3e 100644 --- a/iotdb-protocol/thrift-consensus/pom.xml +++ b/iotdb-protocol/thrift-consensus/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-protocol - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-thrift-consensus IoTDB: Protocol: Thrift Consensus @@ -33,7 +33,7 @@ org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.slf4j diff --git a/iotdb-protocol/thrift-datanode/pom.xml b/iotdb-protocol/thrift-datanode/pom.xml index c7d625d3365c..7b8d75d1dc2a 100644 --- a/iotdb-protocol/thrift-datanode/pom.xml +++ b/iotdb-protocol/thrift-datanode/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-protocol - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT iotdb-thrift IoTDB: Protocol: Thrift Data Node @@ -33,7 +33,7 @@ org.apache.iotdb iotdb-thrift-commons - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.apache.thrift diff --git a/library-udf/pom.xml b/library-udf/pom.xml index b2974a8aca75..2aca5427ad79 100644 --- a/library-udf/pom.xml +++ b/library-udf/pom.xml @@ -24,7 +24,7 @@ org.apache.iotdb iotdb-parent - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT library-udf IoTDB: UDF @@ -41,7 +41,7 @@ org.apache.iotdb udf-api - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT org.slf4j diff --git a/pom.xml b/pom.xml index ca4082062d74..d16c7b73db65 100644 --- a/pom.xml +++ b/pom.xml @@ -28,7 +28,7 @@ org.apache.iotdb iotdb-parent - 1.3.2-SNAPSHOT + 1.3.3-SNAPSHOT pom Apache IoTDB Project Parent POM This is the top level project that builds, packages the tsfile, iotdb engine, jdbc, and integration libs.