diff --git a/bin/hadoop b/bin/hadoop
index 4b4e6e32..651ec0ce 100755
--- a/bin/hadoop
+++ b/bin/hadoop
@@ -68,9 +68,11 @@ if [ $# = 0 ]; then
echo " raidshell [options] run RAID-shell utility"
echo " fs run a generic filesystem user client"
echo " balancer run a cluster balancing utility"
+ echo " avatarbalancer run a avatar cluster balancing utility"
echo " jmxget get JMX exported values from NameNode or DataNode."
echo " oiv apply the offline fsimage viewer to an fsimage"
echo " oev apply the offline edits viewer to an edits file"
+ echo " oid apply the offline fsimage decompressor to an fsimage"
echo " Use -help to see options"
echo " jobtracker run the MapReduce job Tracker node"
echo " pipes run a Pipes job"
@@ -122,6 +124,7 @@ fi
# CLASSPATH initially contains $HADOOP_CONF_DIR
JMX_OPTS=""
CLASSPATH="${HADOOP_CONF_DIR}"
+CLASSPATH=${CLASSPATH}:$HADOOP_CLASSPATH
CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar
# for developers, add Hadoop classes to CLASSPATH
@@ -174,12 +177,6 @@ for f in $HADOOP_HOME/build/hadoop-*-tools.jar; do
TOOL_PATH=${TOOL_PATH}:$f;
done
-# add user-specified CLASSPATH before corona so that a newer
-# corona jar can be specified to override the deployed one
-if [ "$HADOOP_CLASSPATH" != "" ]; then
- CLASSPATH=${CLASSPATH}:${HADOOP_CLASSPATH}
-fi
-
# CORONA_PATH for corona daemons
if [ -d "$HADOOP_HOME/build/contrib/corona/classes" ]; then
CORONA_PATH=${CORONA_PATH}:$HADOOP_HOME/build/contrib/corona/classes
@@ -197,6 +194,15 @@ for f in $HADOOP_HOME/contrib/corona/lib/*.jar; do
CORONA_LIB_PATH=${CORONA_LIB_PATH}:$f;
done
+# NOTIFIER_PATH for the namespace notifier server daemon
+if [ -d "$HADOOP_HOME/build/contrib/namespace-notifier/classes" ]; then
+ NOTIFIER_PATH=${NOTIFIER_PATH}:$HADOOP_HOME/build/contrib/namespace-notifier/classes
+fi
+
+for f in $HADOOP_HOME/contrib/namespace-notifier/*.jar; do
+ NOTIFIER_PATH=${NOTIFIER_PATH}:$f;
+done
+
# default log directory & file
if [ "$HADOOP_LOG_DIR" = "" ]; then
HADOOP_LOG_DIR="$HADOOP_HOME/logs"
@@ -227,9 +233,13 @@ if [ "$COMMAND" = "namenode" ] ; then
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_NAMENODE_OPTS"
elif [ "$COMMAND" = "avatarshell" ] ; then
CLASS='org.apache.hadoop.hdfs.AvatarShell'
+ HADOOP_LOGFILE='avatarshell.log'
+ HADOOP_ROOT_LOGGER=INFO,DRFA
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "avatarzk" ] ; then
CLASS='org.apache.hadoop.hdfs.AvatarZKShell'
+ HADOOP_LOGFILE='avatarzkshell.log'
+ HADOOP_ROOT_LOGGER=INFO,DRFA
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "avatarnode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.namenode.AvatarNode'
@@ -238,6 +248,31 @@ elif [ "$COMMAND" = "avatarnode" ] ; then
elif [ "$COMMAND" = "secondarynamenode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode'
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_SECONDARYNAMENODE_OPTS"
+elif [ "$COMMAND" = "raidnode" ] ; then
+ CLASS='org.apache.hadoop.raid.RaidNode'
+ JMX_OPTS=$HADOOP_JMX_RAIDNODE_OPTS
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS"
+ CLASSPATH=${CORONA_LIB_PATH}:${CLASSPATH}
+elif [ "$COMMAND" = "notifier" ] ; then
+ CLASS='org.apache.hadoop.hdfs.notifier.server.ServerCore'
+ if [ "$NOTIFIER_PATH" != "" ]; then
+ CLASSPATH=${CLASSPATH}:${NOTIFIER_PATH}
+ fi
+ JMX_OPTS="$JMX_OPTS $NOTIFIER_JMX_OPTS"
+elif [ "$COMMAND" = "fsshellservice" ] ; then
+ CLASS='org.apache.hadoop.hdfs.fsshellservice.FsShellServiceImpl'
+ if [ -d "$HADOOP_HOME/build/contrib/corona/lib" ]; then
+ for f in $HADOOP_HOME/build/contrib/corona/lib/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+ fi
+ if [ -d "$HADOOP_HOME/build/contrib/fsshellservice/" ]; then
+ CLASSPATH=${CLASSPATH}:$HADOOP_HOME/build/contrib/fsshellservice/classes
+ fi
+ for f in $HADOOP_HOME/contrib/fsshellservice/*.jar; do
+ CLASSPATH=${CLASSPATH}:$f;
+ done
+ CLASSPATH=${CORONA_LIB_PATH}:${CLASSPATH}
elif [ "$COMMAND" = "avatardatanode" ] ; then
CLASS='org.apache.hadoop.hdfs.server.datanode.AvatarDataNode'
JMX_OPTS=$HADOOP_JMX_DATANODE_OPTS
@@ -280,12 +315,19 @@ elif [ "$COMMAND" = "balancer" ] ; then
CLASS=org.apache.hadoop.hdfs.server.balancer.Balancer
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
CMDLINE_OPTS="$CMDLINE_OPTS $BALANCER_CMDLINE_OPTS"
+elif [ "$COMMAND" = "avatarbalancer" ] ; then
+ CLASS=org.apache.hadoop.hdfs.server.balancer.AvatarBalancer
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_BALANCER_OPTS"
+ CMDLINE_OPTS="$CMDLINE_OPTS $BALANCER_CMDLINE_OPTS"
elif [ "$COMMAND" = "oiv" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "oev" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+elif [ "$COMMAND" = "oid" ] ; then
+ CLASS=org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageDecompressor
+ HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
elif [ "$COMMAND" = "jmxget" ] ; then
CLASS=org.apache.hadoop.hdfs.tools.JMXGet
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
@@ -301,11 +343,14 @@ elif [ "$COMMAND" = "coronaclustermanager" ] ; then
JMX_OPTS=$HADOOP_JMX_CORONACLUSTERMANAGER_OPTS
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_CORONACLUSTERMANAGER_OPTS"
# Corona lib path should be first to ensure that it uses the right thrift JAR
- CLASSPATH=${CORONA_LIB_PATH}:${CLASSPATH}
+ CLASSPATH=${CORONA_LIB_PATH}:${CLUSTER_MANAGER_LIB_PATH}:${CLASSPATH}
elif [ "$COMMAND" = "coronatasktracker" ] ; then
CLASS=org.apache.hadoop.mapred.CoronaTaskTracker
JMX_OPTS=$HADOOP_JMX_TASKTRACKER_OPTS
HADOOP_OPTS="$HADOOP_OPTS $HADOOP_GC_LOG_OPTS $HADOOP_TASKTRACKER_OPTS"
+ # For corona task trackers, the tasks should not get the thrift library.
+ MAPREDUCE_TASK_SYSTEM_CLASSPATH=${CLASSPATH}
+ export MAPREDUCE_TASK_SYSTEM_CLASSPATH
# See coronaclustermanager comment
CLASSPATH=${CORONA_LIB_PATH}:${CLASSPATH}
elif [ "$COMMAND" = "coronaproxyjobtracker" ] ; then
diff --git a/bin/start-corona.sh b/bin/start-corona.sh
index b7098690..c4132a83 100755
--- a/bin/start-corona.sh
+++ b/bin/start-corona.sh
@@ -24,7 +24,8 @@ bin=`cd "$bin"; pwd`
. "$bin"/hadoop-config.sh
# start corona daemons
-# start clustermanager first to minimize connection errors at startup
-"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start coronaclustermanager
+# start start-proxyjt.sh first so that clustermanager can be started correctly
"$bin"/start-proxyjt.sh --config $HADOOP_CONF_DIR
+sleep 1
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start coronaclustermanager
"$bin"/hadoop-daemons.sh --config $HADOOP_CONF_DIR start coronatasktracker
diff --git a/bin/start-fsshellservice.sh b/bin/start-fsshellservice.sh
new file mode 100644
index 00000000..1c1d1ce7
--- /dev/null
+++ b/bin/start-fsshellservice.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+usage="Usage: start-fsshellservice.sh"
+
+params=$#
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+# get arguments
+if [ $# -ge 1 ]; then
+ echo $usage
+fi
+
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start fsshellservice
diff --git a/bin/start-namespace-notifier.sh b/bin/start-namespace-notifier.sh
new file mode 100644
index 00000000..946bf4d2
--- /dev/null
+++ b/bin/start-namespace-notifier.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+usage="Usage: start-namespace-notifier.sh"
+
+params=$#
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+. "$bin"/../conf/hadoop-env.sh
+
+# get arguments
+if [ $# -ge 1 ]; then
+ echo $usage
+fi
+
+export NOTIFIER_JMX_OPTS=" -Dcom.sun.management.jmxremote.port=$NOTIFIER_JMX_PORT -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start notifier
diff --git a/bin/start-raidnode.sh b/bin/start-raidnode.sh
index 72f5cc16..b67fc44d 100755
--- a/bin/start-raidnode.sh
+++ b/bin/start-raidnode.sh
@@ -37,4 +37,4 @@ if [ -f "${HADOOP_CONF_DIR}/hadoop-env.sh" ]; then
fi
export HADOOP_DAEMON_OPTS=$HADOOP_RAIDNODE_OPTS
-"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start org.apache.hadoop.raid.RaidNode
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR start raidnode
diff --git a/bin/stop-fsshellservice.sh b/bin/stop-fsshellservice.sh
new file mode 100644
index 00000000..271639f3
--- /dev/null
+++ b/bin/stop-fsshellservice.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+usage="Usage: stop-fsshellservice.sh"
+
+params=$#
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+# get arguments
+if [ $# -ge 1 ]; then
+ echo $usage
+fi
+
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop fsshellservice
diff --git a/bin/stop-namespace-notifier.sh b/bin/stop-namespace-notifier.sh
new file mode 100644
index 00000000..f29734f1
--- /dev/null
+++ b/bin/stop-namespace-notifier.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+usage="Usage: stop-namespace-notifier.sh"
+
+params=$#
+bin=`dirname "$0"`
+bin=`cd "$bin"; pwd`
+
+. "$bin"/hadoop-config.sh
+
+# get arguments
+if [ $# -ge 1 ]; then
+ echo $usage
+fi
+
+"$bin"/hadoop-daemon.sh --config $HADOOP_CONF_DIR stop notifier
diff --git a/build.xml b/build.xml
index 20457e73..6f84bd0b 100644
--- a/build.xml
+++ b/build.xml
@@ -27,7 +27,7 @@
-
+
@@ -107,7 +107,7 @@
-
+
-
+
@@ -229,6 +229,14 @@
+
+
+
+
+
+
+
+
@@ -499,6 +507,7 @@
+
@@ -559,8 +568,20 @@
-
-
+
+
+
+
+
+
+
@@ -569,8 +590,16 @@
-
-
+
+
+
+
+
+
+
+
+
+
@@ -763,6 +792,7 @@
+
@@ -842,7 +872,7 @@
+ value="${build.native}/lib:${lib.dir}/native/${build.platform}:${snappy.lib}"/>
@@ -905,7 +935,7 @@
-
+
@@ -1214,6 +1244,8 @@
+
+
@@ -1306,6 +1338,17 @@
+
+
+
+
+
+
+
+
+
+
+
@@ -1327,6 +1370,8 @@
+
+
@@ -1378,6 +1423,11 @@
+
+
+
+
+
@@ -1388,6 +1438,7 @@
+
@@ -1911,7 +1962,7 @@
@@ -2006,5 +2057,9 @@
+
+
+
+
diff --git a/conf/hadoop-env.sh b/conf/hadoop-env.sh
index ecf3eea8..2d8bb4fe 100644
--- a/conf/hadoop-env.sh
+++ b/conf/hadoop-env.sh
@@ -1,68 +1,74 @@
-# Set Hadoop-specific environment variables here.
+# This if statement insures that this file will be sources only once
+if [[ "$IS_HADOOP_ENV_ALREADY_SOURCED" != "true" ]]; then
+ export IS_HADOOP_ENV_ALREADY_SOURCED="true";
-# The only required environment variable is JAVA_HOME. All others are
-# optional. When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
+ # Set Hadoop-specific environment variables here.
-# The java implementation to use. Required.
-# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
+ # The only required environment variable is JAVA_HOME. All others are
+ # optional. When running a distributed configuration it is best to
+ # set JAVA_HOME in this file, so that it is correctly defined on
+ # remote nodes.
-# Extra Java CLASSPATH elements. Optional.
-# export HADOOP_CLASSPATH=
+ # The java implementation to use. Required.
+ # export JAVA_HOME=/usr/lib/j2sdk1.5-sun
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HADOOP_HEAPSIZE=2000
+ # Extra Java CLASSPATH elements. Optional.
+ # export HADOOP_CLASSPATH=
-# Extra Java runtime options. Empty by default.
-# export HADOOP_OPTS=-server
+ # The maximum amount of heap to use, in MB. Default is 1000.
+ # export HADOOP_HEAPSIZE=2000
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
-export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
-export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
-export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
-export HADOOP_RAIDNODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_RAIDNODE_OPTS"
-export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote.port=8998 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+ # Extra Java runtime options. Empty by default.
+ # export HADOOP_OPTS=-server
-# The only user who can start hadoop daemons.
-# If this is not set, any user can start hadoop daemons.
-export HADOOP_USERNAME="hadoop"
+ # Command specific options appended to HADOOP_OPTS when specified
+ export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
+ export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
+ export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
+ export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
+ export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
+ export HADOOP_RAIDNODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_RAIDNODE_OPTS"
+ export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote.port=8998 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
-# Java Runtime garbage collection options to pass to all Hadoop
-# servers (Namenode, Jobtracker, Datanode, Tasktracker). This must end
-# with a colon ; to which the dynamically generated gc log filename will
-# be appended to. The below defaults work for the Sun JVM, for example
-# in IBM GC, use '-Xverbosegclog:'.
-#export HADOOP_GC_LOG_OPTS="-XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:"
+ # The only user who can start hadoop daemons.
+ # If this is not set, any user can start hadoop daemons.
+ export HADOOP_USERNAME="hadoop"
-# export HADOOP_TASKTRACKER_OPTS=
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-# export HADOOP_CLIENT_OPTS
+ # Java Runtime garbage collection options to pass to all Hadoop
+ # servers (Namenode, Jobtracker, Datanode, Tasktracker). This must end
+ # with a colon ; to which the dynamically generated gc log filename will
+ # be appended to. The below defaults work for the Sun JVM, for example
+ # in IBM GC, use '-Xverbosegclog:'.
+ #export HADOOP_GC_LOG_OPTS="-XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:"
-# Extra ssh options. Empty by default.
-# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
+ # export HADOOP_TASKTRACKER_OPTS=
+ # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+ # export HADOOP_CLIENT_OPTS
-# Where log files are stored. $HADOOP_HOME/logs by default.
-# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
+ # Extra ssh options. Empty by default.
+ # export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
-# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+ # Where log files are stored. $HADOOP_HOME/logs by default.
+ # export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
-# host:path where hadoop code should be rsync'd from. Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+ # File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
+ # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
+ # host:path where hadoop code should be rsync'd from. Unset by default.
+ # export HADOOP_MASTER=master:/home/$USER/src/hadoop
-# The directory where pid files are stored. /tmp by default.
-# export HADOOP_PID_DIR=/var/hadoop/pids
+ # Seconds to sleep between slave commands. Unset by default. This
+ # can be useful in large clusters, where, e.g., slave rsyncs can
+ # otherwise arrive faster than the master can service them.
+ # export HADOOP_SLAVE_SLEEP=0.1
-# A string representing this instance of hadoop. $USER by default.
-# export HADOOP_IDENT_STRING=$USER
+ # The directory where pid files are stored. /tmp by default.
+ # export HADOOP_PID_DIR=/var/hadoop/pids
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HADOOP_NICENESS=10
+ # A string representing this instance of hadoop. $USER by default.
+ # export HADOOP_IDENT_STRING=$USER
+
+ # The scheduling priority for daemon processes. See 'man nice'.
+ # export HADOOP_NICENESS=10
+
+fi
diff --git a/conf/hadoop-env.sh.template b/conf/hadoop-env.sh.template
index ecf3eea8..2d8bb4fe 100644
--- a/conf/hadoop-env.sh.template
+++ b/conf/hadoop-env.sh.template
@@ -1,68 +1,74 @@
-# Set Hadoop-specific environment variables here.
+# This if statement insures that this file will be sources only once
+if [[ "$IS_HADOOP_ENV_ALREADY_SOURCED" != "true" ]]; then
+ export IS_HADOOP_ENV_ALREADY_SOURCED="true";
-# The only required environment variable is JAVA_HOME. All others are
-# optional. When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
+ # Set Hadoop-specific environment variables here.
-# The java implementation to use. Required.
-# export JAVA_HOME=/usr/lib/j2sdk1.5-sun
+ # The only required environment variable is JAVA_HOME. All others are
+ # optional. When running a distributed configuration it is best to
+ # set JAVA_HOME in this file, so that it is correctly defined on
+ # remote nodes.
-# Extra Java CLASSPATH elements. Optional.
-# export HADOOP_CLASSPATH=
+ # The java implementation to use. Required.
+ # export JAVA_HOME=/usr/lib/j2sdk1.5-sun
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HADOOP_HEAPSIZE=2000
+ # Extra Java CLASSPATH elements. Optional.
+ # export HADOOP_CLASSPATH=
-# Extra Java runtime options. Empty by default.
-# export HADOOP_OPTS=-server
+ # The maximum amount of heap to use, in MB. Default is 1000.
+ # export HADOOP_HEAPSIZE=2000
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
-export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
-export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
-export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
-export HADOOP_RAIDNODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_RAIDNODE_OPTS"
-export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote.port=8998 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+ # Extra Java runtime options. Empty by default.
+ # export HADOOP_OPTS=-server
-# The only user who can start hadoop daemons.
-# If this is not set, any user can start hadoop daemons.
-export HADOOP_USERNAME="hadoop"
+ # Command specific options appended to HADOOP_OPTS when specified
+ export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
+ export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
+ export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
+ export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
+ export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
+ export HADOOP_RAIDNODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_RAIDNODE_OPTS"
+ export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote.port=8998 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
-# Java Runtime garbage collection options to pass to all Hadoop
-# servers (Namenode, Jobtracker, Datanode, Tasktracker). This must end
-# with a colon ; to which the dynamically generated gc log filename will
-# be appended to. The below defaults work for the Sun JVM, for example
-# in IBM GC, use '-Xverbosegclog:'.
-#export HADOOP_GC_LOG_OPTS="-XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:"
+ # The only user who can start hadoop daemons.
+ # If this is not set, any user can start hadoop daemons.
+ export HADOOP_USERNAME="hadoop"
-# export HADOOP_TASKTRACKER_OPTS=
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-# export HADOOP_CLIENT_OPTS
+ # Java Runtime garbage collection options to pass to all Hadoop
+ # servers (Namenode, Jobtracker, Datanode, Tasktracker). This must end
+ # with a colon ; to which the dynamically generated gc log filename will
+ # be appended to. The below defaults work for the Sun JVM, for example
+ # in IBM GC, use '-Xverbosegclog:'.
+ #export HADOOP_GC_LOG_OPTS="-XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:"
-# Extra ssh options. Empty by default.
-# export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
+ # export HADOOP_TASKTRACKER_OPTS=
+ # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+ # export HADOOP_CLIENT_OPTS
-# Where log files are stored. $HADOOP_HOME/logs by default.
-# export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
+ # Extra ssh options. Empty by default.
+ # export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
-# File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+ # Where log files are stored. $HADOOP_HOME/logs by default.
+ # export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
-# host:path where hadoop code should be rsync'd from. Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+ # File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
+ # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
+ # host:path where hadoop code should be rsync'd from. Unset by default.
+ # export HADOOP_MASTER=master:/home/$USER/src/hadoop
-# The directory where pid files are stored. /tmp by default.
-# export HADOOP_PID_DIR=/var/hadoop/pids
+ # Seconds to sleep between slave commands. Unset by default. This
+ # can be useful in large clusters, where, e.g., slave rsyncs can
+ # otherwise arrive faster than the master can service them.
+ # export HADOOP_SLAVE_SLEEP=0.1
-# A string representing this instance of hadoop. $USER by default.
-# export HADOOP_IDENT_STRING=$USER
+ # The directory where pid files are stored. /tmp by default.
+ # export HADOOP_PID_DIR=/var/hadoop/pids
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HADOOP_NICENESS=10
+ # A string representing this instance of hadoop. $USER by default.
+ # export HADOOP_IDENT_STRING=$USER
+
+ # The scheduling priority for daemon processes. See 'man nice'.
+ # export HADOOP_NICENESS=10
+
+fi
diff --git a/conf/log4j.properties b/conf/log4j.properties
index 402f3a71..7e04cbc1 100644
--- a/conf/log4j.properties
+++ b/conf/log4j.properties
@@ -34,7 +34,7 @@ log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c{1}: %m%n
# Debugging Pattern format
#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
diff --git a/conf/log4j.properties.scribeappender b/conf/log4j.properties.scribeappender
new file mode 100644
index 00000000..958b0f22
--- /dev/null
+++ b/conf/log4j.properties.scribeappender
@@ -0,0 +1,135 @@
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+#log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# Rollver at the top of every hour
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd-HH
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c{1}: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+#
+# logmonitor understood
+# This format is the one that logmonitor can understand. It is heavyweight so
+# should only be used for WARN and above
+#
+
+log4j.appender.LM=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.LM.threshold=WARN
+log4j.appender.LM.File=${hadoop.log.dir}/logmonitor-${hadoop.log.file}
+log4j.appender.LM.DatePattern=.yyyy-MM-dd-HH
+log4j.appender.LM.layout=org.apache.log4j.PatternLayout
+log4j.appender.LM.layout.ConversionPattern=[%c{3},%L] [%d{EEE MMM dd HH:mm:ss yyyy}] %p: %m%n
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+#
+# FSNamesystem Audit logging
+# All audit events are logged at INFO level
+#
+log4j.logger.org.apache.hadoop.hdfs.server.FSNamesystem.audit=INFO
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.metrics.jvm.EventCounter
+
+# Special appender for RAID metrics.
+log4j.logger.RaidMetrics=INFO,SCRIBE_RAID_METRICS_APPENDER
+
+# RaidMetrics
+log4j.appender.SCRIBE_RAID_METRICS_APPENDER=com.facebook.logging.ScribeAppender
+log4j.appender.SCRIBE_RAID_METRICS_APPENDER.tag=${hadoop.tasklog.taskid}
+log4j.appender.SCRIBE_RAID_METRICS_APPENDER.application=raid
+log4j.appender.SCRIBE_RAID_METRICS_APPENDER.installation=${hadoop.installationid}
+log4j.appender.SCRIBE_RAID_METRICS_APPENDER.layout=org.apache.log4j.PatternLayout
+log4j.appender.SCRIBE_RAID_METRICS_APPENDER.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.SCRIBE_RAID_METRICS_APPENDER.Threshold=INFO
+
+#
+# This is a scribe appender. The data will be sent directly to scribe
+#
+#
+log4j.appender.scribe=com.facebook.logging.ScribeAppender
+log4j.appender.scribe.tag=${hadoop.tasklog.taskid}
+log4j.appender.scribe.application=${hadoop.application}
+log4j.appender.scribe.installation=${hadoop.installationid}
+log4j.appender.scribe.layout=org.apache.log4j.PatternLayout
+log4j.appender.scribe.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.scribe.threshold=WARN
+
+log4j.logger.com.facebook.smc.SmcUtil=INFO,console
diff --git a/copy-hdfs-jars-to-maven.sh b/copy-hdfs-jars-to-maven.sh
index 7ffdf8f1..45033901 100755
--- a/copy-hdfs-jars-to-maven.sh
+++ b/copy-hdfs-jars-to-maven.sh
@@ -5,59 +5,68 @@
# and test) built in titan/VENDOR/hadoop-0.20/.
#
+set -e -u -o pipefail
BASEDIR=`dirname $0`
cd ${BASEDIR}
-if [ ! -f build/hadoop-0.20.1-dev-core.jar ]; then
- if [ ! -f build/hadoop-0.20-core.jar ]; then
- echo "core jar not found. Running 'ant jar'..."
- ant jar | grep BUILD;
- fi
+VERSION=$( ant -q print-version | head -1 | awk '{print $2}' )
+if [ -z "$VERSION" ]; then
+ echo "Unable to determine Hadoop version" >&2
+ exit 1
fi
-if [ ! -f build/hadoop-0.20.1-dev-test.jar ]; then
- if [ ! -f build/hadoop-0.20-test.jar ]; then
- echo "test jar not found. Running 'ant jar-test'..."
- ant jar-test | grep BUILD;
- fi
+TARGETS=""
+
+CORE_JAR=build/hadoop-$VERSION-core.jar
+if [ ! -f $CORE_JAR ]; then
+ TARGETS="$TARGETS jar"
fi
+CORE_POM=build/ivy/maven/generated.pom
+if [ ! -f $CORE_POM ]; then
+ TARGETS="$TARGETS makepom"
+fi
-#
-# The names of core/test jar name depend
-# on whether they were generated using
-# build_all.sh script or just the vanilla
-# simple ant jar/jar-test
-#
-if [ -f build/hadoop-0.20.1-dev-core.jar ]; then
- CORE_JAR=build/hadoop-0.20.1-dev-core.jar
-else
- CORE_JAR=build/hadoop-0.20-core.jar
+TEST_JAR=build/hadoop-$VERSION-test.jar
+if [ ! -f $TEST_JAR ]; then
+ TARGETS="$TARGETS jar-test"
fi
-if [ -f build/hadoop-0.20.1-dev-test.jar ]; then
- TEST_JAR=build/hadoop-0.20.1-dev-test.jar
-else
- TEST_JAR=build/hadoop-0.20-test.jar
+if [ -n "$TARGETS" ]; then
+ ant $TARGETS
fi
+# Clear the optional flag on Hadoop dependencies so these dependencies can be
+# included transitively in other projects.
+CORE_POM_MODIFIED=$CORE_POM.new
+./edit_generated_pom.py >$CORE_POM_MODIFIED
+
echo "** Publishing hadoop* core & test jars "
echo "** to "
echo "** your local maven repo (~/.m2/repository). "
echo "** HBase builds will pick up the HDFS* jars from the local maven repo."
-mvn install:install-file \
- -DgeneratePom=true \
+# When running under Commander, use the setting.xml file that specifies
+# the localRepository for a central mvn repo that can be shared between
+# all of the build/test agents
+OPTS=""
+if [[ -n "${COMMANDER_WORKSPACE:-}" || "$USER" == "svcscm" ]]; then
+ OPTS="-s /scm/git/electric/hadoop_builds/settings.xml"
+fi
+
+mvn $OPTS install:install-file \
+ -DpomFile=$CORE_POM_MODIFIED \
-DgroupId=org.apache.hadoop \
-DartifactId=hadoop-core \
- -Dversion=0.20 \
+ -Dversion=$VERSION \
-Dpackaging=jar \
-Dfile=${CORE_JAR}
-mvn install:install-file \
+mvn $OPTS install:install-file \
-DgeneratePom=true \
-DgroupId=org.apache.hadoop \
-DartifactId=hadoop-test \
- -Dversion=0.20 \
+ -Dversion=$VERSION \
-Dpackaging=jar \
-Dfile=${TEST_JAR}
+
diff --git a/edit_generated_pom.py b/edit_generated_pom.py
new file mode 100644
index 00000000..c2f88d39
--- /dev/null
+++ b/edit_generated_pom.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+
+'''
+Reads the automatically generated Hadoop pom file, removes the "optional"
+flag from dependencies so that they could be included transitively into other
+projects such as HBase, and removes certain dependencies that are not required
+and could even break the code (e.g. an old version of xerces). Writes the
+modified project object model XML to standard output.
+'''
+
+import os
+import re
+import sys
+
+from xml.dom.minidom import parse
+
+NON_TRANSITIVE_DEPS = [
+ # Old version, breaks HBase
+ 'xerces',
+
+ # Not used in production
+ 'checkstyle',
+ 'jdiff',
+
+ # A release audit tool, probably not used in prod
+ 'rat-lib',
+]
+
+POM_FILE = 'build/ivy/maven/generated.pom'
+doc = parse(POM_FILE)
+deps = doc.getElementsByTagName('dependencies')[0]
+
+for dep in deps.getElementsByTagName('dependency'):
+ for c in dep.childNodes:
+ if (c.nodeName == 'artifactId' and
+ c.firstChild and
+ c.firstChild.nodeValue and
+ c.firstChild.nodeValue.strip() in NON_TRANSITIVE_DEPS):
+ deps.removeChild(dep)
+ break
+
+ for o in dep.getElementsByTagName('optional'):
+ dep.removeChild(o)
+
+out_lines = doc.toprettyxml(indent=' ' * 2)
+lines = []
+for l in out_lines.split('\n'):
+ l = l.rstrip()
+ if l:
+ lines.append(l)
+output = '\n'.join(lines)
+
+# Make sure values stay on the same line: value
+output = re.sub(
+ r'(<([a-zA-Z]+)>)'
+ r'\s*([^<>]+?)\s*'
+ r'(\2>)', r'\1\3\4', output)
+
+print output
+
diff --git a/hdfs-autoconf/README.md b/hdfs-autoconf/README.md
new file mode 100644
index 00000000..e47bbfa2
--- /dev/null
+++ b/hdfs-autoconf/README.md
@@ -0,0 +1,213 @@
+What is this?
+=============
+
+This is autoconfigurator and autolauncher for a local HDFS cluster.
+It is supposed to be mainly used for developer purposes, and it provides
+you with bunch of scripts for setting everything up in a minute.. or maybe two.
+Enjoy!
+
+DISCLAIMER: The scripts are written and tested on the GNU system and relies
+on GNU tools. At least two of them (`sed` & `readlink`) are known
+to be incompatible with their BSD implementations.
+
+
+
+STARTING CLUSTER
+================
+
+1. Make sure you have a zookeeper quorum started somewhere and that file
+ `config-meta/avatar-shared.sed` has a `zookeeper-quorum` entry that points
+ to the quorum. If not, you can start a local zookeeper via
+ `zookeeper.sh start` command
+2. `./build.sh` - builds all sources needed to start HDFS cluster
+3. `./avatar-format` - formats cluster directories
+4. `./start-dev-cluster --count 3` - starts local cluster with 3 datanodes.
+
+[OPTIONAL] If you want to change any `core-site.xml` or `hdfs-site.xml`
+ properties, make the necessary changes in the `config-templates/core-site.xml.template` and
+ `config-meta/hdfs-site.xml.template` files. If you want to configure cluster
+ directories, please refer to FAQ questions "Where do namenodes store their data?" and
+ "Where do datanodes store their data?".
+
+
+
+F.A.Q
+=====
+
+Where do I find cluster log files?
+----------------------------------
+
+Logs directory is specified by `$LOGS_DIRECTORY` variable, which defauls to
+`$HADOOP_VERSION/logs`.
+
+
+Where do namenodes store their data?
+------------------------------------
+
+1. The directory that is used as a local directory for the active namenode is
+ specified in the `./config-meta/avatar-zero.sed` file.
+2. Similar to the active namenode, the local directory for the standby
+ specified in the `./config-meta/avatar-one.sed` file.
+3. The shared namenodes directory is specified in the
+ `./config-meta/avatar-shared.sed` file
+
+
+Where do datanodes store their data?
+------------------------------------
+
+Each datanode has a set of volumes, and autotool maps volumes
+to distinct local directories. These directories are specified in
+datanode configuration file which is only one line long and has the following
+entry:
+
+```
+s:{{DataNode-volumes}}:[,...]:g
+```
+
+In case of starting cluster with `./start-dev-cluster --count 5` command,
+every of 5 datanodes will be started with a configuration file produced with the
+help of `./config-meta/avatar-datanode.template` template. Consider having the following
+template:
+
+```
+s:{{DataNode-volumes}}:/tmp/hadoop-datanode-XXX-vol0/,/tmp/hadoop-datanode-XXX-vol1/:g
+```
+
+This would mean that the first datanode has two volumes mapped to
+`/tmp/hadoop-datanode-1-vol0/` and `/tmp/hadoop-datanode-1-vol1/` directories, and the
+forth one has `/tmp/hadoop-datanode-4-vol0/` and `/tmp/hadoop-datanode-4-vol1/`.
+That is because the "XXX" sequence in the `avatar-datanode.template` file is
+substituted with the sequential datanode number to provide it with unique
+directories on the local machine.
+
+
+What is the format of files in `config-meta` directory?
+-------------------------------------------------------
+
+These files are SED (Stream Editor) scripts. Though the syntax of SED scripts
+is not coincise, autoconf tool utilizes only `substitute` command.
+
+The substitution command basically looks like this:
+
+```
+s:cat:dog:g
+```
+
+This example will substitute every 'cat' for 'dog'. The 's' letter stands for
+'substitute' command, and the trailing 'g' is a flag that enforces sed to substitute
+every entry of 'cat'; otherwise it would be done only for first occurences of
+'cat' per line.
+
+Any symbol could be used as a command delimeter. Thus said, the followings are fully
+equal to the previous example
+```
+ s_cat_dog_g
+ s%cat%dog%g
+ s/cat/dog/g
+```
+
+This feature could be utilized to avoid escaping inside of sed scripts. Consider
+looking at the following example
+```
+ s:some-folder:/tmp/foo:g
+ s_URL_localhost:7777_g
+```
+
+
+How do I add new datanode configuration file?
+---------------------------------------------
+
+1. create a file with the name that matches format 'avatar-datanode-*.sed'
+(the format of the datanode configuration files is specified by
+`$DATANODE_CONFIG_FILES` variable in `config.sh` file)
+
+2. Fill in the file with the following content
+```
+s:{{DataNode-volumes}}:[,...]:g
+```
+
+
+What is an example of datanode config file with multiple volumes?
+-----------------------------------------------------------------
+
+A datanode with two volumes, each resides in its own directory, will look the
+following way
+
+```
+s:{{DataNode-volumes}}:/tmp/mydatanode-volume-1/,/tmp/mydatanode-volume-2/:g
+```
+
+So the directories should be listed one after another, separated with comma
+delimeter.
+NOTE: Make sure you do not put any spaces!
+
+
+What exactly does autoconf tool do?
+-----------------------------------
+
+Whenever autoconf tool starts some HDFS instance, it does the following
+sequence of actions:
+
+1. Picks template files from `config-templates` direcotry
+2. Runs `sed` scripts from `config-meta` directory over them
+3. Puts results of sed execution to the `hadoop-0.20/bin` directory (the path
+ to `hadoop-0.20` directory is specified via `$HADOOP_VERSION`)
+4. Launches the HDFS instance
+
+
+PRO stuff: multiple hadoop checkouts
+------------------------------------
+
+To switch between multiple hadoop checkouts just edit `./config.sh` file,
+setting a `$HADOOP_VERSION` variable to the path of checkout you would like.
+
+
+
+Files overview
+==============
+
+Client scripts
+--------------
+
+This is the list of scripts that are designed to be used by user. For more
+information, you can refer to the source code of every script or just
+run it with `--help` argument.
+
+* `./build.sh` - builds everything
+* `./avatar-format` - formats directories for avatar namenodes (both active and
+ standby)
+* `./avatar-zero-start` - starts active avatar
+* `./avatar-one-start` - starts standby avatar
+* `./avatar-datanode-start` - allows you to choose a config and start a datanode
+ instance configured according to it.
+ instance. Zookeeper is absolutely necessary for the cluster functioning, and
+ it is started and stopped automatically with cluster
+* `./start-dev-cluster.sh` - starts all the nodes as daemons for the local cluster
+* `./stop-dev-cluster.sh` - stops instantiated developer cluster (simply killing
+ all the processes with `avatar` in the name)
+* `./zookeeper.sh` - this script is used to start and stop local zookeeper
+
+
+Other directory files
+---------------------
+
+* `./config-meta` - the directory that contains all the options for the local
+ cluster
+ - `./config-meta/avatar-shared.sed` - configuration of shared directories, used by
+ both Active and Stand-by avatar nodes
+ - `./config-meta/avatar-zero.sed` - configuration of local directories for node zero
+ - `./config-meta/avatar-one.sed` - configuration of local directories for node one
+ - `./config-meta/avatar-datanode*.sed` - configuration files for datanodes, one file per
+ node.
+ - `./config-meta/avatar-datanode.template` - configuration file that is used
+ to automatically generate datanode configuration files. Read more about this
+ file in the FIXME
+* `./config-templates` - stores all the files that are been run substitutions over.
+* `./launchpad` - that stores generated scripts, should not be used
+ unless you _really_ know what you do.
+* `./scripts` - here you can find scripts that do the dirty job
+* `./README.md` - markdown README in best github traditions.
+* `./config.sh` - this file exports a `$HADOOP_VERSION` variable as well as
+ couple of other variables. You might refer to the file often if you have
+ multiple hadoop checkouts
+
diff --git a/hdfs-autoconf/avatar-datanode-start b/hdfs-autoconf/avatar-datanode-start
new file mode 100644
index 00000000..f2fe90aa
--- /dev/null
+++ b/hdfs-autoconf/avatar-datanode-start
@@ -0,0 +1,133 @@
+#!/bin/bash
+
+# Usage: bash avatar-datanode-start [--conf configFile] [--daemon]
+set -e
+
+usage="USAGE
+ bash $(basename $0) [--help] [--format] [--conf configFile] [--daemon]
+
+DESCRIPTION
+ Starts locally an avatar datanode with one of the configurations. If
+ the --conf options is not specified, the script brings up a menu listing
+ all the found datanode configuration files and letting user to make his
+ choice.
+
+OPTIONS
+ --help - shows this help message
+ --format - forces datanode to format its directories before it starts. If this
+ option is not given, then datanode does not format directories unless
+ they do not exist
+ --conf - specifies which configuration to use for starting datanode.
+ --daemon - starts datanode as a daemon process. Logs will go to
+ the directory specified by \$LOGS_DIRECTORY variable
+"
+
+if [[ ${PWD##*/} != "hdfs-autoconf" ]]; then
+ echo "The script should be launched from ./hdfs-autoconf directory. Exiting.."
+ exit 1
+fi
+
+if (( $# >= 1 )); then
+ if [[ "$1" == "--help" ]]; then
+ echo "$usage"
+ exit 0
+ fi
+fi
+
+format="false"
+if (( $# >= 1 )); then
+ if [[ "$1" == "--format" ]]; then
+ format="true"
+ shift;
+ fi
+fi
+
+if (( $# >= 2 )); then
+ if [[ "$1" == "--conf" ]]; then
+ shift;
+ datanodeConfig=$1;
+ shift;
+ fi
+fi
+
+daemon=false;
+if (( $# >= 1 )); then
+ if [[ "$1" == "--daemon" ]]; then
+ daemon=true;
+ shift;
+ fi
+fi
+
+if (( $# > 0 )); then
+ echo "$usage"
+ exit 1;
+fi
+
+source scripts/common.sh
+
+
+function showUserMenu {
+ echo -e "Searching for configurations ${cWHITE}$DATANODE_CONFIG_FILES${cRESET}..."
+ echo -e "Select config for this instance of datanode:"
+
+ counter=0;
+ for i in $(ls -1 $DATANODE_CONFIG_FILES); do
+ counter=$(expr $counter + 1);
+ echo -e " ${cWHITE}[$counter]${cRESET} $i"
+ done;
+
+ amount=$counter
+
+ if (( $amount == 0 )); then
+ fail "No configuration files found"
+ fi
+
+ read -p "
+ Which one to start (1-$amount): "
+ if [[ $REPLY == "" ]]; then
+ echo "Exiting...";
+ exit 0;
+ fi
+
+ if ! [[ $REPLY =~ ^[0-9]+$ ]]; then
+ fail "Command must be a number (no whitespaces!)"
+ fi
+ if !(( $REPLY > 0 && $REPLY <= $amount )); then
+ fail "Wrong command!"
+ fi
+
+ datanodeConfig=$(ls -1 $DATANODE_CONFIG_FILES | head -$REPLY | tail -1);
+}
+
+if [[ "$daemon" == "true" ]]; then
+ # HACK: we're removing *.pid files from logs directory so that hadoop
+ # daemon will allow us to start multiple instances
+ rm -f ${LOGS_DIRECTORY}/*.pid
+fi
+
+if [[ $datanodeConfig == "" ]]; then
+ showUserMenu
+fi
+
+
+# creating logs subdirectory from the name of config file
+datanodeLogsDirectory=${datanodeConfig##*/}
+datanodeLogsDirectory=${datanodeLogsDirectory%.*}
+export HADOOP_LOG_DIR=${LOGS_DIRECTORY}/$datanodeLogsDirectory
+./scripts/gen-datanode $datanodeConfig
+if [[ $format == "true" ]]; then
+ ./$LAUNCHPAD_DIR/dn-format --hard
+else
+ ./$LAUNCHPAD_DIR/dn-format --soft
+fi
+
+runArgs="";
+if [[ "$daemon" == "true" ]]; then
+ runArgs="$runArgs --daemon";
+fi
+
+./$LAUNCHPAD_DIR/run $runArgs
+
+# wait some time to make sure the running instance actually
+# read all the config files
+sleep 3
diff --git a/hdfs-autoconf/avatar-format b/hdfs-autoconf/avatar-format
new file mode 100644
index 00000000..6fd73a02
--- /dev/null
+++ b/hdfs-autoconf/avatar-format
@@ -0,0 +1,65 @@
+#!/bin/bash
+
+set -e
+
+usage="USAGE
+ bash $(basename $0) [--help]
+
+DESCRIPTION
+ Formats directories that are used for both Active and Standby namenodes.
+
+OPTIONS
+ --help - show this help message
+"
+
+if [[ ${PWD##*/} != "hdfs-autoconf" ]]; then
+ echo "The script should be launched from ./hdfs-autoconf directory. Exiting.."
+ exit 1
+fi
+
+if (( $# >= 1 )); then
+ if [[ $1 == "--help" ]]; then
+ echo "$usage";
+ exit 0;
+ fi
+fi
+
+if (( $# > 0 )); then
+ echo "$usage";
+ exit 1;
+fi
+
+source scripts/common.sh
+
+# populate config
+./scripts/gen-avatar zero
+
+# creating directory formatters
+LOCAL_DIR_FORMATTER="$TEMPLATES_DIR/format-avatarnode-local-dir.sh.template"
+SHARED_DIR_FORMATTER="$TEMPLATES_DIR/format-avatarnode-shared-dir.sh.template"
+
+AVATAR_LOCAL_ZERO="$LAUNCHPAD_DIR/avatar-zero-local-dir.sh"
+cp $LOCAL_DIR_FORMATTER $AVATAR_LOCAL_ZERO
+patch $AVATAR_LOCAL_ZERO $METACONF_DIR/avatar-zero.sed
+
+AVATAR_LOCAL_ONE="$LAUNCHPAD_DIR/avatar-one-local-dir.sh"
+cp $LOCAL_DIR_FORMATTER $AVATAR_LOCAL_ONE
+patch $AVATAR_LOCAL_ONE $METACONF_DIR/avatar-one.sed
+
+AVATAR_SHARED="$LAUNCHPAD_DIR/avatar-shared-dir.sh"
+cp $SHARED_DIR_FORMATTER $AVATAR_SHARED
+patch $AVATAR_SHARED $METACONF_DIR/avatar-shared.sed
+
+echo "Creating avatar directories"
+bash $AVATAR_LOCAL_ZERO
+bash $AVATAR_LOCAL_ONE
+bash $AVATAR_SHARED
+
+
+echo "Formatting avatar..."
+source config.sh
+cd ${HADOOP_VERSION}/bin
+./hadoop avatarzk -updateZK -zero -force
+./hadoop avatarnode -format
+echo -e "${cGREEN}Done.${cRESET}"
+
diff --git a/hdfs-autoconf/avatar-one-start b/hdfs-autoconf/avatar-one-start
new file mode 100644
index 00000000..d6353f4f
--- /dev/null
+++ b/hdfs-autoconf/avatar-one-start
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+# Usage: bash avatar-one-start [--daemon]
+
+set -e
+usage="USAGE
+ bash $(basename $0) [--help] [--daemon]
+
+DESCRIPTION
+ Starts locally an avatar namenode which is stand-by default.
+
+OPTIONS
+ --help - shows this help message
+ --daemon - starts avatar as a daemon process. Logs will go to
+ the directory specified by \$LOGS_DIRECTORY variable
+"
+
+if [[ ${PWD##*/} != "hdfs-autoconf" ]]; then
+ echo "The script should be launched from ./hdfs-autoconf directory. Exiting.."
+ exit 1
+fi
+
+if (( $# >= 1)); then
+ if [[ $1 == "--help" ]]; then
+ echo "$usage";
+ exit 0;
+ fi
+fi
+
+daemon="false";
+if (( $# >= 1 )); then
+ if [[ $1 == "--daemon" ]]; then
+ daemon="true"
+ shift;
+ fi;
+fi
+
+if (( $# > 0 )); then
+ echo "$usage";
+ exit 1;
+fi
+
+source config.sh
+
+runArgs="";
+if [[ "$daemon" == "true" ]]; then
+ # HACK: after every launch we should remove `pid` file so that
+ # `hadoop-daemon.sh` that is actually called in the depth)
+ # won't complain about instances that are already started
+ rm -f ${LOGS_DIRECTORY}/*.pid
+ runArgs="--daemon";
+fi
+
+export HADOOP_LOG_DIR=${LOGS_DIRECTORY}/avatar-one-logs
+
+./scripts/gen-avatar one
+$LAUNCHPAD_DIR/run $runArgs
+
+# wait some time to make sure the running instance actually
+# read all the config files
+sleep 3
diff --git a/hdfs-autoconf/avatar-zero-start b/hdfs-autoconf/avatar-zero-start
new file mode 100644
index 00000000..9eaf1642
--- /dev/null
+++ b/hdfs-autoconf/avatar-zero-start
@@ -0,0 +1,67 @@
+#!/bin/bash
+
+# Usage: bash avatar-one-start [--daemon]
+set -e
+
+usage="USAGE
+ bash $(basename $0) [--help] [--daemon]
+
+DESCRIPTION
+ Starts locally an avatar namenode which is active by default.
+
+OPTIONS
+ --help - shows this help message
+ --daemon - starts avatar as a daemon process. Logs will go to
+ the directory specified by \$LOGS_DIRECTORY variable
+"
+
+if [[ ${PWD##*/} != "hdfs-autoconf" ]]; then
+ echo "The script should be launched from ./hdfs-autoconf directory. Exiting.."
+ exit 1
+fi
+
+if (( $# >= 1)); then
+ if [[ $1 == "--help" ]]; then
+ echo "$usage";
+ exit 0;
+ fi
+fi
+
+daemon="false";
+if (( $# >= 1 )); then
+ if [[ $1 == "--daemon" ]]; then
+ daemon="true";
+ shift;
+ fi
+fi
+
+if (( $# > 0 )); then
+ echo "$usage"
+ exit 1
+fi
+
+source config.sh
+
+runArgs=""
+if [[ "$daemon" == "true" ]]; then
+ # HACK: after every launch we should remove `pid` file so that
+ # `hadoop-daemon.sh` that is actually called in the depth)
+ # won't complain about instances that are already started
+ rm -f ${LOGS_DIRECTORY}/*.pid
+
+ runArgs="$runArgs --daemon"
+fi
+
+./scripts/gen-avatar zero
+
+cd ${HADOOP_VERSION}/bin
+./hadoop avatarzk -updateZK -zero -force
+cd -
+
+export HADOOP_LOG_DIR=${LOGS_DIRECTORY}/avatar-zero-logs
+
+$LAUNCHPAD_DIR/run $runArgs
+
+# wait some time to make sure the running instance actually
+# read all the config files
+sleep 3
diff --git a/hdfs-autoconf/build.sh b/hdfs-autoconf/build.sh
new file mode 100644
index 00000000..79a95444
--- /dev/null
+++ b/hdfs-autoconf/build.sh
@@ -0,0 +1,48 @@
+#!/bin/bash
+set -e
+
+usage="USAGE
+ $(basename $0) [--help] [--fast]
+
+DESCRIPTION
+ Builds HDFS from sources.
+
+OPTIONS
+ --help - shows this help
+ --fast - EXPERIMENTAL option, does some build 3 times faster than default
+ build.
+"
+
+if (( $# >= 1 )); then
+ if [[ $1 == "--help" ]]; then
+ echo "$usage";
+ exit 0;
+ fi
+fi
+
+compile="full"
+if (( $# >= 1 )); then
+ if [[ $1 == "--fast" ]]; then
+ compile="fast"
+ shift
+ fi
+fi
+
+if (( $# > 0 )); then
+ echo "$usage"
+ exit 1
+fi
+
+source config.sh
+
+cd ${HADOOP_VERSION};
+
+if [[ $compile == "full" ]]; then
+ ant clean compile
+elif [[ $compile == "fast" ]]; then
+ ant clean compile-core
+ cd src/contrib/highavailability
+ ant clean compile
+fi
+
+
diff --git a/hdfs-autoconf/config-meta/avatar-datanode-1.sed b/hdfs-autoconf/config-meta/avatar-datanode-1.sed
new file mode 100644
index 00000000..72c70745
--- /dev/null
+++ b/hdfs-autoconf/config-meta/avatar-datanode-1.sed
@@ -0,0 +1,2 @@
+# datanode volumes: list without spaces, comma-delimeted
+s:{{DataNode-volumes}}:/tmp/hadoop-datanode-0-vol0/,/tmp/hadoop-datanode-0-vol1/:g
diff --git a/hdfs-autoconf/config-meta/avatar-datanode-2.sed b/hdfs-autoconf/config-meta/avatar-datanode-2.sed
new file mode 100644
index 00000000..b0536216
--- /dev/null
+++ b/hdfs-autoconf/config-meta/avatar-datanode-2.sed
@@ -0,0 +1,2 @@
+# datanode config
+s:{{DataNode-volumes}}:/tmp/hadoop-datanode-1-vol0/,/tmp/hadoop-datanode-1-vol1/:g
diff --git a/hdfs-autoconf/config-meta/avatar-datanode.template b/hdfs-autoconf/config-meta/avatar-datanode.template
new file mode 100644
index 00000000..79c1f451
--- /dev/null
+++ b/hdfs-autoconf/config-meta/avatar-datanode.template
@@ -0,0 +1,6 @@
+# This file is used as a template for generating datanode config files
+# automatically. Instead of "XXX" it will subsitute sequential number
+# of a datanode instance. This way you can specify the format and amount
+# of volumes for the automatically generated datanode configuration files
+#
+s:{{DataNode-volumes}}:/tmp/hadoop-datanode-XXX-vol0/,/tmp/hadoop-datanode-XXX-vol1/:g
diff --git a/hdfs-autoconf/config-meta/avatar-one.sed b/hdfs-autoconf/config-meta/avatar-one.sed
new file mode 100644
index 00000000..b8898837
--- /dev/null
+++ b/hdfs-autoconf/config-meta/avatar-one.sed
@@ -0,0 +1,5 @@
+# local avatar 1 config
+s:{{NameNode-local}}:/tmp/hadoop-avatar-1-local/:g
+s:{{NameNode-local-fsimage}}:/tmp/hadoop-avatar-1-local/fsimage/:g
+s:{{NameNode-local-fsedits}}:/tmp/hadoop-avatar-1-local/fsedits/:g
+
diff --git a/hdfs-autoconf/config-meta/avatar-shared.sed b/hdfs-autoconf/config-meta/avatar-shared.sed
new file mode 100644
index 00000000..3bacef0f
--- /dev/null
+++ b/hdfs-autoconf/config-meta/avatar-shared.sed
@@ -0,0 +1,10 @@
+# setting up shared avatar directories
+# all these paths will be created relatively to /tmp directory
+s:{{NameNode-shared}}:/tmp/hadoop-avatar-shared/:g
+s:{{NameNode-shared-fsimage-0}}:/tmp/hadoop-avatar-shared/fsimage-zero/:g
+s:{{NameNode-shared-fsedits-0}}:/tmp/hadoop-avatar-shared/fsedits-zero/:g
+s:{{NameNode-shared-fsimage-1}}:/tmp/hadoop-avatar-shared/fsimage-one/:g
+s:{{NameNode-shared-fsedits-1}}:/tmp/hadoop-avatar-shared/fsedits-one/:g
+
+# ground may be a separator as well
+s_{{zookeeper-quorum}}_localhost_g
diff --git a/hdfs-autoconf/config-meta/avatar-zero.sed b/hdfs-autoconf/config-meta/avatar-zero.sed
new file mode 100644
index 00000000..e00c462c
--- /dev/null
+++ b/hdfs-autoconf/config-meta/avatar-zero.sed
@@ -0,0 +1,5 @@
+# local avatar 0 config
+s:{{NameNode-local}}:/tmp/hadoop-avatar-0-local/:g
+s:{{NameNode-local-fsimage}}:/tmp/hadoop-avatar-0-local/fsimage/:g
+s:{{NameNode-local-fsedits}}:/tmp/hadoop-avatar-0-local/fsedits/:g
+
diff --git a/hdfs-autoconf/config-templates/avatar-site.xml.template b/hdfs-autoconf/config-templates/avatar-site.xml.template
new file mode 100644
index 00000000..531049b7
--- /dev/null
+++ b/hdfs-autoconf/config-templates/avatar-site.xml.template
@@ -0,0 +1,115 @@
+
+
+
+
+
+
+ dfs.http.address0
+ localhost:50070
+
+ The address and the base port where the dfs namenode web ui will listen on.
+ If the port is 0 then the server will start on a free port.
+
+
+
+
+ dfs.http.address1
+ localhost:50080
+
+ The address and the base port where the dfs namenode web ui will listen on.
+ If the port is 0 then the server will start on a free port.
+
+
+
+
+ dfs.name.dir
+ {{NameNode-local-fsimage}}
+ Determines where on the local filesystem the DFS name node
+ should store the name table(fsimage). If this is a comma-delimited list
+ of directories then the name table is replicated in all of the
+ directories, for redundancy.
+
+
+
+ dfs.name.edits.dir
+ {{NameNode-local-fsedits}}
+ Determines where on the local filesystem the DFS name node
+ should store the transaction (edits) file. If this is a comma-delimited list of directories then the transaction file is replicated in all of the
+ directories, for redundancy. Default value is same as dfs.name.dir
+
+
+
+
+ dfs.name.dir.shared0
+ {{NameNode-shared-fsimage-0}}
+ Determines where on the filer the AvatarNode
+ should store the name table(fsimage).
+
+
+
+
+ dfs.name.dir.shared1
+ {{NameNode-shared-fsimage-1}}
+ Determines where on the filer the other instance of the AvatarNode
+ should store the name table(fsimage).
+
+
+
+
+ dfs.name.edits.dir.shared0
+ {{NameNode-shared-fsedits-0}}
+ Determines where on the filer the AvatarNode
+ should store the transaction (edits) file. If this is a comma-delimited list of directories then the transaction file is replicated in all of the
+ directories, for redundancy. Default value is same as dfs.name.dir
+
+
+
+
+ dfs.name.edits.dir.shared1
+ {{NameNode-shared-fsedits-1}}
+ Determines where on the filer the other instance of the AvatarNode
+ should store the transaction (edits) file.
+
+
+
+
+ fs.checkpoint.enabled
+ true
+
+
+
+ standby.image.copies.tokeep
+ 5
+ The number of backup copies of the image
+ and fsedits to keep around.
+
+
+
+
+ standby.image.days.tokeep
+ 2
+ How old should the backup image
+ be to get deleted.
+
+
+
+
+ dfs.namenode.dn-address0
+ localhost:9005
+
+ The address and port to run the RPC server which will be processing
+ requests from datanodes in the cluster.
+
+
+
+
+ dfs.namenode.dn-address1
+ localhost:9006
+
+ The address and port to run the RPC server which will be processing
+ requests from datanodes in the cluster.
+
+
+
+
+
diff --git a/hdfs-autoconf/config-templates/core-site.xml.template b/hdfs-autoconf/config-templates/core-site.xml.template
new file mode 100644
index 00000000..9d74a635
--- /dev/null
+++ b/hdfs-autoconf/config-templates/core-site.xml.template
@@ -0,0 +1,95 @@
+
+
+
+
+
+
+
+
+ fs.default.name
+ hdfs://localhost:9000
+ The name of the default file system. A URI whose
+ scheme and authority determine the FileSystem implementation. The
+ uri's scheme determines the config property (fs.SCHEME.impl) naming
+ the FileSystem implementation class. The uri's authority is used to
+ determine the host, port, etc. for a filesystem.
+
+
+
+ fs.default.name0
+ hdfs://localhost:9000
+ The name of the default file system. A URI whose
+ scheme and authority determine the FileSystem implementation. The
+ uri's scheme determines the config property (fs.SCHEME.impl) naming
+ the FileSystem implementation class. The uri's authority is used to
+ determine the host, port, etc. for a filesystem.
+
+
+
+ fs.default.name1
+ hdfs://localhost:9010
+ The name of the default file system. A URI whose
+ scheme and authority determine the FileSystem implementation. The
+ uri's scheme determines the config property (fs.SCHEME.impl) naming
+ the FileSystem implementation class. The uri's authority is used to
+ determine the host, port, etc. for a filesystem.
+
+
+
+ fs.checkpoint.period
+ 600
+
+ The number of seconds between two periodic checkpoints
+
+
+
+
+ fs.checkpoint.size
+ 10000000
+
+ Defines the size of the edits log file that forces an urgent checkpoint even
+ if the maximum checkpoint delay is not reached.
+
+
+
+
+ fs.ha.zookeeper.quorum
+ {{zookeeper-quorum}}
+ The list of ZK servers DAFS will be connecting to
+
+
+
+ ipc.client.connect.max.retries
+ 10
+
+
+
+ ipc.client.connect.timeout
+ 5
+
+
+
+
+ fs.hdfs.impl
+ org.apache.hadoop.hdfs.DistributedAvatarFileSystem
+
+
+
+ fs.ha.zookeeper.cache
+ true
+
+
+
+ fs.ha.zookeeper.timeout
+ 30000
+ Indicates the session timeout for a zookeeper client connection
+
+
+
+ fs.ha.retrywrites
+ true
+ retry writes or not
+
+
+
+
diff --git a/hdfs-autoconf/config-templates/format-avatardatanode.sh.template b/hdfs-autoconf/config-templates/format-avatardatanode.sh.template
new file mode 100644
index 00000000..11f90ab9
--- /dev/null
+++ b/hdfs-autoconf/config-templates/format-avatardatanode.sh.template
@@ -0,0 +1,53 @@
+#!/bin/bash
+set -e
+
+usage="USAGE
+ bash $(basename $0) [--help] [--soft]
+
+DESCRIPTION
+ Formats all the directories needed for every datanodes' volume.
+ In case the directory for volume already exists, it recreates it
+ thus deleting all the underlying data (this is also called HARD mode),
+ unless --soft option is given
+
+OPTIONS
+ --help - shows this help message
+ --soft - does not recreate directory if it already exists. This
+ option is used to preserve the data of the datanode
+"
+
+if (( $# >= 1 )); then
+ if [[ "$1" == "--help" ]]; then
+ echo "$usage"
+ exit 0
+ fi
+fi
+
+soft="false";
+if (( $# >= 1 )); then
+ if [[ "$1" == "--soft" ]]; then
+ soft="true";
+ shift;
+ fi
+fi
+
+volumeDirs=$(echo {{DataNode-volumes}} | tr ',' '\n');
+echo "Volume dirs: $volumeDirs"
+
+if [[ "$soft" == "true" ]]; then
+ echo "Datanode is formatted in a SOFT mode"
+ for i in $volumeDirs; do
+ if ! [[ -d $i ]]; then
+ mkdir $i;
+ fi
+ done;
+elif [[ "$soft" == "false" ]]; then
+ echo "Datanode is formatted in a HARD mode"
+ for i in $volumeDirs; do
+ rm -rf $i;
+ mkdir $i;
+ done;
+else
+ echo "This is a bug. Local variable \$soft has a bad value of $soft"
+ exit 1
+fi
diff --git a/hdfs-autoconf/config-templates/format-avatarnode-local-dir.sh.template b/hdfs-autoconf/config-templates/format-avatarnode-local-dir.sh.template
new file mode 100644
index 00000000..186c566f
--- /dev/null
+++ b/hdfs-autoconf/config-templates/format-avatarnode-local-dir.sh.template
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+rm -rf {{NameNode-local}};
+mkdir -p {{NameNode-local-fsimage}};
+mkdir -p {{NameNode-local-fsedits}};
+
diff --git a/hdfs-autoconf/config-templates/format-avatarnode-shared-dir.sh.template b/hdfs-autoconf/config-templates/format-avatarnode-shared-dir.sh.template
new file mode 100644
index 00000000..28263713
--- /dev/null
+++ b/hdfs-autoconf/config-templates/format-avatarnode-shared-dir.sh.template
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+rm -rf {{NameNode-shared}};
+mkdir -p {{NameNode-shared-fsimage-0}};
+mkdir -p {{NameNode-shared-fsedits-0}};
+mkdir -p {{NameNode-shared-fsimage-1}};
+mkdir -p {{NameNode-shared-fsedits-1}};
diff --git a/hdfs-autoconf/config-templates/hadoop-env-avatar-one.sh b/hdfs-autoconf/config-templates/hadoop-env-avatar-one.sh
new file mode 100644
index 00000000..a051a4d7
--- /dev/null
+++ b/hdfs-autoconf/config-templates/hadoop-env-avatar-one.sh
@@ -0,0 +1,71 @@
+if [[ "$IS_HADOOP_ENV_ALREADY_SOURCED" != "true" ]]; then
+ export IS_HADOOP_ENV_ALREADY_SOURCED="true"
+ # Set Hadoop-specific environment variables here.
+
+ # The only required environment variable is JAVA_HOME. All others are
+ # optional. When running a distributed configuration it is best to
+ # set JAVA_HOME in this file, so that it is correctly defined on
+ # remote nodes.
+
+ # The java implementation to use. Required.
+ # export JAVA_HOME=/usr/lib/j2sdk1.5-sun
+
+ # Extra Java CLASSPATH elements. Optional.
+ export HADOOP_CLASSPATH=${HADOOP_TRUNK_MAIN}/VENDOR/hadoop-0.20/lib/
+
+ # The maximum amount of heap to use, in MB. Default is 1000.
+ # export HADOOP_HEAPSIZE=2000
+
+ # Extra Java runtime options. Empty by default.
+ # export HADOOP_OPTS=-server
+
+ # Command specific options appended to HADOOP_OPTS when specified
+ export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
+ export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
+ export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
+ export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
+ export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
+ export HADOOP_RAIDNODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_RAIDNODE_OPTS"
+ export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote.port=8998 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+ # The only user who can start hadoop daemons.
+ # If this is not set, any user can start hadoop daemons.
+ # export HADOOP_USERNAME="hadoop"
+
+ # Java Runtime garbage collection options to pass to all Hadoop
+ # servers (Namenode, Jobtracker, Datanode, Tasktracker). This must end
+ # with a colon ; to which the dynamically generated gc log filename will
+ # be appended to. The below defaults work for the Sun JVM, for example
+ # in IBM GC, use '-Xverbosegclog:'.
+ #export HADOOP_GC_LOG_OPTS="-XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:"
+
+ # export HADOOP_TASKTRACKER_OPTS=
+ # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+ # export HADOOP_CLIENT_OPTS
+
+ # Extra ssh options. Empty by default.
+ # export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
+
+ # Where log files are stored. $HADOOP_HOME/logs by default.
+ # export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
+
+ # File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
+ # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+ # host:path where hadoop code should be rsync'd from. Unset by default.
+ # export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+ # Seconds to sleep between slave commands. Unset by default. This
+ # can be useful in large clusters, where, e.g., slave rsyncs can
+ # otherwise arrive faster than the master can service them.
+ # export HADOOP_SLAVE_SLEEP=0.1
+
+ # The directory where pid files are stored. /tmp by default.
+ # export HADOOP_PID_DIR=/var/hadoop/pids
+
+ # A string representing this instance of hadoop. $USER by default.
+ # export HADOOP_IDENT_STRING=$USER
+
+ # The scheduling priority for daemon processes. See 'man nice'.
+ # export HADOOP_NICENESS=10
+fi
diff --git a/hdfs-autoconf/config-templates/hadoop-env-avatar-zero.sh b/hdfs-autoconf/config-templates/hadoop-env-avatar-zero.sh
new file mode 100644
index 00000000..e1a2d0af
--- /dev/null
+++ b/hdfs-autoconf/config-templates/hadoop-env-avatar-zero.sh
@@ -0,0 +1,71 @@
+if [[ "$IS_HADOOP_ENV_ALREADY_SOURCED" != "true" ]]; then
+ export IS_HADOOP_ENV_ALREADY_SOURCED="true"
+ # Set Hadoop-specific environment variables here.
+
+ # The only required environment variable is JAVA_HOME. All others are
+ # optional. When running a distributed configuration it is best to
+ # set JAVA_HOME in this file, so that it is correctly defined on
+ # remote nodes.
+
+ # The java implementation to use. Required.
+ # export JAVA_HOME=/usr/lib/j2sdk1.5-sun
+
+ # Extra Java CLASSPATH elements. Optional.
+ #export HADOOP_CLASSPATH=${HADOOP_TRUNK_MAIN}/VENDOR/hadoop-0.20/lib/
+
+ # The maximum amount of heap to use, in MB. Default is 1000.
+ export HADOOP_HEAPSIZE=2000
+
+ # Extra Java runtime options. Empty by default.
+ # export HADOOP_OPTS=-server
+
+ # Command specific options appended to HADOOP_OPTS when specified
+ export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
+ export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
+ export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
+ export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
+ export HADOOP_RAIDNODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_RAIDNODE_OPTS"
+ export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote -Xmx3g -Xms3g $HADOOP_NAMENODE_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=9070"
+ #export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote.port=8998 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+ # The only user who can start hadoop daemons.
+ # If this is not set, any user can start hadoop daemons.
+ #export HADOOP_USERNAME="hadoop"
+
+ # Java Runtime garbage collection options to pass to all Hadoop
+ # servers (Namenode, Jobtracker, Datanode, Tasktracker). This must end
+ # with a colon ; to which the dynamically generated gc log filename will
+ # be appended to. The below defaults work for the Sun JVM, for example
+ # in IBM GC, use '-Xverbosegclog:'.
+ #export HADOOP_GC_LOG_OPTS="-XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:"
+
+ # export HADOOP_TASKTRACKER_OPTS=
+ # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+ # export HADOOP_CLIENT_OPTS
+
+ # Extra ssh options. Empty by default.
+ # export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
+
+ # Where log files are stored. $HADOOP_HOME/logs by default.
+ # export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
+
+ # File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
+ # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+ # host:path where hadoop code should be rsync'd from. Unset by default.
+ # export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+ # Seconds to sleep between slave commands. Unset by default. This
+ # can be useful in large clusters, where, e.g., slave rsyncs can
+ # otherwise arrive faster than the master can service them.
+ # export HADOOP_SLAVE_SLEEP=0.1
+
+ # The directory where pid files are stored. /tmp by default.
+ # export HADOOP_PID_DIR=/var/hadoop/pids
+
+ # A string representing this instance of hadoop. $USER by default.
+ # export HADOOP_IDENT_STRING=$USER
+
+ # The scheduling priority for daemon processes. See 'man nice'.
+ # export HADOOP_NICENESS=10
+fi
diff --git a/hdfs-autoconf/config-templates/hadoop-env-datanode.sh b/hdfs-autoconf/config-templates/hadoop-env-datanode.sh
new file mode 100644
index 00000000..81870a56
--- /dev/null
+++ b/hdfs-autoconf/config-templates/hadoop-env-datanode.sh
@@ -0,0 +1,71 @@
+if [[ "$IS_HADOOP_ENV_ALREADY_SOURCED" != "true" ]]; then
+ export IS_HADOOP_ENV_ALREADY_SOURCED="true"
+ # Set Hadoop-specific environment variables here.
+
+ # The only required environment variable is JAVA_HOME. All others are
+ # optional. When running a distributed configuration it is best to
+ # set JAVA_HOME in this file, so that it is correctly defined on
+ # remote nodes.
+
+ # The java implementation to use. Required.
+ # export JAVA_HOME=/usr/lib/j2sdk1.5-sun
+
+ # Extra Java CLASSPATH elements. Optional.
+ # export HADOOP_CLASSPATH=
+
+ # The maximum amount of heap to use, in MB. Default is 1000.
+ # export HADOOP_HEAPSIZE=2000
+
+ # Extra Java runtime options. Empty by default.
+ # export HADOOP_OPTS=-server
+
+ # Command specific options appended to HADOOP_OPTS when specified
+ export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_NAMENODE_OPTS"
+ export HADOOP_SECONDARYNAMENODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_SECONDARYNAMENODE_OPTS"
+ export HADOOP_DATANODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_DATANODE_OPTS"
+ export HADOOP_BALANCER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_BALANCER_OPTS"
+ export HADOOP_JOBTRACKER_OPTS="-Dcom.sun.management.jmxremote $HADOOP_JOBTRACKER_OPTS"
+ export HADOOP_RAIDNODE_OPTS="-Dcom.sun.management.jmxremote $HADOOP_RAIDNODE_OPTS"
+ #export HADOOP_NAMENODE_OPTS="-Dcom.sun.management.jmxremote.port=8998 -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
+
+ # The only user who can start hadoop daemons.
+ # If this is not set, any user can start hadoop daemons.
+ # export HADOOP_USERNAME="hadoop"
+
+ # Java Runtime garbage collection options to pass to all Hadoop
+ # servers (Namenode, Jobtracker, Datanode, Tasktracker). This must end
+ # with a colon ; to which the dynamically generated gc log filename will
+ # be appended to. The below defaults work for the Sun JVM, for example
+ # in IBM GC, use '-Xverbosegclog:'.
+ #export HADOOP_GC_LOG_OPTS="-XX:+PrintGCDateStamps -XX:+PrintGCDetails -Xloggc:"
+
+ # export HADOOP_TASKTRACKER_OPTS=
+ # The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+ # export HADOOP_CLIENT_OPTS
+
+ # Extra ssh options. Empty by default.
+ # export HADOOP_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HADOOP_CONF_DIR"
+
+ # Where log files are stored. $HADOOP_HOME/logs by default.
+ # export HADOOP_LOG_DIR=${HADOOP_HOME}/logs
+
+ # File naming remote slave hosts. $HADOOP_HOME/conf/slaves by default.
+ # export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+ # host:path where hadoop code should be rsync'd from. Unset by default.
+ # export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+ # Seconds to sleep between slave commands. Unset by default. This
+ # can be useful in large clusters, where, e.g., slave rsyncs can
+ # otherwise arrive faster than the master can service them.
+ # export HADOOP_SLAVE_SLEEP=0.1
+
+ # The directory where pid files are stored. /tmp by default.
+ # export HADOOP_PID_DIR=/var/hadoop/pids
+
+ # A string representing this instance of hadoop. $USER by default.
+ # export HADOOP_IDENT_STRING=$USER
+
+ # The scheduling priority for daemon processes. See 'man nice'.
+ # export HADOOP_NICENESS=10
+fi
diff --git a/hdfs-autoconf/config-templates/hdfs-site.xml.template b/hdfs-autoconf/config-templates/hdfs-site.xml.template
new file mode 100644
index 00000000..f7d3d73e
--- /dev/null
+++ b/hdfs-autoconf/config-templates/hdfs-site.xml.template
@@ -0,0 +1,118 @@
+
+
+
+
+
+
+
+
+dfs.replication
+1
+
+
+
+ dfs.http.address
+ 127.0.0.1:50070
+
+ The address and the base port where the dfs namenode web ui will listen on.
+ If the port is 0 then the server will start on a free port.
+
+
+
+
+ dfs.secondary.http.address
+ 0.0.0.0:0
+
+ The secondary namenode http server address and port.
+ If the port is 0 then the server will start on a free port.
+
+
+
+
+ dfs.blockreport.intervalMsec
+ 300000
+ Determines block reporting interval in milliseconds.
+
+
+
+ dfs.fullblockreport.magnifier
+ 2
+
+ Determines the full block reporting interval, which is magnifier
+ times the delete block report interval.
+
+
+
+
+ dfs.datanode.address
+ 0.0.0.0:0
+
+ The address where the datanode server will listen to.
+ If the port is 0 then the server will start on a free port.
+
+
+
+
+ dfs.datanode.http.address
+ 0.0.0.0:0
+
+ The datanode http server address and port.
+ If the port is 0 then the server will start on a free port.
+
+
+
+
+ dfs.datanode.ipc.address
+ 0.0.0.0:0
+
+ The datanode ipc server address and port.
+ If the port is 0 then the server will start on a free port.
+
+
+
+
+ dfs.datanode.handler.count
+ 3
+ The number of server threads for the datanode.
+
+
+
+ dfs.permissions
+ false
+
+
+
+ dfs.data.dir
+ {{DataNode-volumes}}
+ Determines where on the local filesystem an DFS data node
+ should store its blocks. If this is a comma-delimited
+ list of directories, then data will be stored in all named
+ directories, typically on different devices.
+ Directories that do not exist are ignored.
+
+
+
+
+ dfs.block.invalidate.limit
+ 100
+
+
+
+ dfs.safemode.extension
+ 10000
+
+ Determines extension of safe mode in milliseconds
+ after the threshold level is reached.
+
+
+
+
+ dfs.namenode.dn-address
+ localhost:9015
+
+ The address and port to run the RPC server which will be processing
+ requests from datanodes in the cluster.
+
+
+
+
diff --git a/hdfs-autoconf/config-templates/run-datanode.sh b/hdfs-autoconf/config-templates/run-datanode.sh
new file mode 100644
index 00000000..95e116ca
--- /dev/null
+++ b/hdfs-autoconf/config-templates/run-datanode.sh
@@ -0,0 +1,12 @@
+#!/bin/bash
+#Usage: bash $LAUNCHPAD_DIR/run.sh [--daemon]
+
+source config.sh
+
+cd ${HADOOP_VERSION}/bin
+if [[ $# > 0 && $1 == "--daemon" ]]; then
+ export HADOOP_PID_DIR="$LOGS_DIRECTORY" && ./hadoop-daemon.sh start avatardatanode
+else
+ ./hadoop avatardatanode
+fi
+
diff --git a/hdfs-autoconf/config-templates/run-one.template b/hdfs-autoconf/config-templates/run-one.template
new file mode 100644
index 00000000..a24524c5
--- /dev/null
+++ b/hdfs-autoconf/config-templates/run-one.template
@@ -0,0 +1,12 @@
+#!/bin/bash
+#Usage: bash $LAUNCHPAD_DIR/run.sh [--daemon]
+
+source config.sh
+
+cd $HADOOP_VERSION/bin
+if [[ $# > 0 && $1 == "--daemon" ]]; then
+ echo "daemon mode"
+ export HADOOP_PID_DIR="$LOGS_DIRECTORY" && ./hadoop-daemon.sh start avatarnode -one -standby;
+else
+ ./hadoop avatarnode -one -standby;
+fi
diff --git a/hdfs-autoconf/config-templates/run-zero.template b/hdfs-autoconf/config-templates/run-zero.template
new file mode 100644
index 00000000..21491edc
--- /dev/null
+++ b/hdfs-autoconf/config-templates/run-zero.template
@@ -0,0 +1,12 @@
+#!/bin/bash
+#Usage: bash $LAUNCHPAD_DIR/run.sh [--daemon]
+
+source config.sh
+
+cd $HADOOP_VERSION/bin
+
+if [[ $# > 0 && $1 == "--daemon" ]]; then
+ export HADOOP_PID_DIR="$LOGS_DIRECTORY" && ./hadoop-daemon.sh start avatarnode -zero;
+else
+ ./hadoop avatarnode -zero;
+fi
diff --git a/hdfs-autoconf/config-templates/zoo.cfg b/hdfs-autoconf/config-templates/zoo.cfg
new file mode 100644
index 00000000..aafb3247
--- /dev/null
+++ b/hdfs-autoconf/config-templates/zoo.cfg
@@ -0,0 +1,25 @@
+# The number of milliseconds of each tick
+tickTime=2000
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit=10
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit=5
+# the directory where the snapshot is stored.
+# do not use /tmp for storage, /tmp here is just
+# example sakes.
+dataDir=/tmp/zookeeper
+# the port at which the clients will connect
+clientPort=2181
+#
+# Be sure to read the maintenance section of the
+# administrator guide before turning on autopurge.
+#
+# http://zookeeper.apache.org/doc/current/zookeeperAdmin.html#sc_maintenance
+#
+# The number of snapshots to retain in dataDir
+#autopurge.snapRetainCount=3
+# Purge task interval in hours
+# Set to "0" to disable auto purge feature
+#autopurge.purgeInterval=1
diff --git a/hdfs-autoconf/config.sh b/hdfs-autoconf/config.sh
new file mode 100644
index 00000000..133e29e0
--- /dev/null
+++ b/hdfs-autoconf/config.sh
@@ -0,0 +1,57 @@
+#!/bin/bash
+
+# This script is sources by every other script.
+
+# let's stop execution when some simlpe command fails
+set -e
+
+# ==================================================
+# CONFIGURE BEFORE USE
+# ==================================================
+
+# This argument specifies the hadoop checkout. So the binaries will be run
+# from ${HADOOP_VERSION}/bin directory, and configuration files assumed to be
+# located in ${HADOOP_VERSION}/conf directory.
+# HADOOP_VERSION=
+if [[ -z $HADOOP_VERSION ]]; then
+ HADOOP_VERSION=$(readlink -f ../)
+fi
+
+# This is the directory that will hold all the log files for different
+# instances.
+# DISCLAIMER: Full path must be specified here!
+if [[ -z $LOGS_DIRECTORY ]]; then
+ LOGS_DIRECTORY=$HADOOP_VERSION/logs
+fi
+
+# ===================================================
+# ===================================================
+
+
+METACONF_DIR="./config-meta"
+TEMPLATES_DIR="./config-templates"
+LAUNCHPAD_DIR="./launchpad"
+# This is the pattern that will be searched for the datanode configuration files
+DATANODE_CONFIG_FILES="$METACONF_DIR/avatar-datanode*.sed"
+# This is the file that will exist as long as the cluster is running.
+# Used by start-dev-cluster and stop-dev-cluster scripts
+CLUSTER_IS_RUNNING=$LOGS_DIRECTORY/cluster-is-running-now
+
+
+if ! [[ -d $METACONF_DIR ]]; then
+ echo "Cannot find $METACONF_DIR directory; check config.sh to correct the dir"
+ exit 1
+fi
+
+if ! [[ -d $TEMPLATES_DIR ]]; then
+ echo "Cannot find $TEMPLATES_DIR directory; check config.sh to correct the dir"
+ exit 1
+fi
+
+if ! [[ -d $LAUNCHPAD_DIR ]]; then
+ mkdir -p $LAUNCHPAD_DIR
+fi
+
+if [[ -z $ZOOKEEPER_PATH ]]; then
+ ZOOKEEPER_PATH="`pwd`/../../../VENDOR.zookeeper/fb-trunk/"
+fi
diff --git a/hdfs-autoconf/scripts/common.sh b/hdfs-autoconf/scripts/common.sh
new file mode 100644
index 00000000..9e330b9d
--- /dev/null
+++ b/hdfs-autoconf/scripts/common.sh
@@ -0,0 +1,77 @@
+#!/bin/bash
+
+source config.sh
+
+# Colors!
+# How to use them? See example:
+# echo -e "See the real ${cRED}RED${cRESET} color"
+
+cBLACK='\E[0;30m'
+cRED='\E[0;31m'
+cGREEN='\E[0;32m'
+cYELLOW='\E[0;33m'
+cBLUE='\E[0;34m'
+cMAGENTA='\E[0;35m'
+cCYAN='\E[0;36m'
+cWHITE='\E[1;37m'
+cRESET='\E[00m'
+
+# just print a message in red color
+function fail {
+ echo -e "${cRED}$1${cRESET}"
+ exit 1
+}
+
+# The script patches a template file with sed scripts. All changes
+# are made in-place
+#
+# Usage
+# bash patcher.sh ...
+function patch {
+ if [[ $# < 2 ]]; then
+ echo "usage: bash patcher.sh [...]"
+ exit 1
+ fi
+ # first argument is a template file to patch
+ template=$1;
+ shift;
+
+ for sedScript in $@; do
+ sed -f $sedScript -i $template
+ done;
+}
+
+function cleanLaunchpad {
+ if [[ -e $LAUNCHPAD_DIR ]]; then
+ echo "Cleaning $LAUNCHPAD_DIR/ directory.."
+ rm -r $LAUNCHPAD_DIR
+ mkdir $LAUNCHPAD_DIR
+ fi
+}
+
+function genAvatarConfigFiles {
+ if [[ $# < 1 ]]; then
+ echo "Usage: genAvatarConfigFiles "
+ exit 1;
+ fi
+
+ cp $TEMPLATES_DIR/avatar-site.xml.template ${HADOOP_VERSION}/conf/avatar-site.xml
+ patch ${HADOOP_VERSION}/conf/avatar-site.xml $@
+
+ cp $TEMPLATES_DIR/core-site.xml.template ${HADOOP_VERSION}/conf/core-site.xml
+ patch ${HADOOP_VERSION}/conf/core-site.xml $@
+
+ echo "Config files created."
+}
+
+function genDatanodeLaunchpadFiles {
+ if [[ $# < 1 ]]; then
+ echo "Usage: generateDatanodeLaunchpadFiles "
+ exit 1;
+ fi
+
+ cp $TEMPLATES_DIR/format-avatardatanode.sh.template $LAUNCHPAD_DIR/dn-format
+ patch $LAUNCHPAD_DIR/dn-format $@
+
+ cp $TEMPLATES_DIR/run-datanode.sh $LAUNCHPAD_DIR/run
+}
diff --git a/hdfs-autoconf/scripts/gen-avatar b/hdfs-autoconf/scripts/gen-avatar
new file mode 100644
index 00000000..991a837c
--- /dev/null
+++ b/hdfs-autoconf/scripts/gen-avatar
@@ -0,0 +1,82 @@
+#!/bin/bash
+set -e
+
+if [[ -e scripts ]]; then
+ source scripts/common.sh
+else
+ echo "Lacking /scripts directory. Probably I'm not launched from autoconf
+ directory. Exiting.."
+ exit 1
+fi
+
+if [[ ${PWD##*/} != "hdfs-autoconf" ]]; then
+ fail "The script should be launched from ./hdfs-autoconf directory"
+fi
+
+usage="USAGE
+ bash $(basename $0) [--help] [zero|one]
+
+DESCRIPTION
+ Creates configuration and launch files for active or standby avatar namenode
+ entity by applying appropriate sed scripts from $METACONF_DIR directory
+ to the template configuration files and bash scripts from $TEMPLATES_DIR. The
+ scripts used for formatting and launching the nodes could be found in
+ $LAUNCHPAD_DIR.
+
+OPTIONS
+ --help - show this help message
+ zero - generate configuration and bashscripts for launching active avatar
+ namenode
+ one - generate configuration and bashscripts for launching standby avatar
+ namenode
+"
+
+if (( $# >= 1 )); then
+ if [[ $1 == "--help" ]]; then
+ echo "$usage";
+ exit 0;
+ fi
+fi
+
+function usageFail {
+ fail "Usage: bash gen-avatar-files.sh [--help] [zero|one]"
+}
+
+# Checking arguments
+if [[ $# == 0 ]]; then
+ echo -e "${cWHITE}NOTE${cRESET}: Niether of arguments (zero|one) given; assuming ZERO as default"
+ echo -e "------"
+ avatarName="zero"
+elif [[ $# == 1 ]]; then
+ avatarName=$(echo $1 | tr '[A-Z]' '[a-z]')
+ if [[ $avatarName != "one" && $avatarName != "zero" ]]; then
+ usageFail
+ fi
+elif [[ $# > 1 ]]; then
+ usageFail
+fi
+
+# setting up a capital name of avatar for future convenience
+capitalAvatarName=$(echo ${avatarName} | tr '[a-z]' '[A-Z]')
+
+# setting up set of configuration files
+configFiles="$METACONF_DIR/avatar-shared.sed $METACONF_DIR/avatar-${avatarName}.sed"
+
+# create files in conf/ directory of HADOOP_VERSION
+echo -e "Populating ${cWHITE}${HADOOP_VERSION}/conf${cRESET} with config files for avatar ${cGREEN}${capitalAvatarName}${cRESET}"
+genAvatarConfigFiles $configFiles
+# copying environment config
+cp $TEMPLATES_DIR/hadoop-env-avatar-${avatarName}.sh ${HADOOP_VERSION}/conf/hadoop-env.sh
+
+# creating $LAUNCHPAD_DIR shellscripts; they will be used to run and format avatar node
+cleanLaunchpad;
+echo -e "Creating $LAUNCHPAD_DIR files for avatar ${cGREEN}${capitalAvatarName}${cRESET}.."
+cp $TEMPLATES_DIR/run-${avatarName}.template $LAUNCHPAD_DIR/run
+
+# Finish
+echo -e "${cGREEN}AVATAR SUCCESS${cRESET}
+${cWHITE}${HADOOP_VERSION}/conf${cRESET} is configured for avatar node ${cGREEN}${capitalAvatarName}${cRESET}.
+------
+You can start avatar node ${capitalAvatarName} via ${cWHITE}./$LAUNCHPAD_DIR/run${cRESET}"
+
+
diff --git a/hdfs-autoconf/scripts/gen-datanode b/hdfs-autoconf/scripts/gen-datanode
new file mode 100644
index 00000000..b0ad3ce6
--- /dev/null
+++ b/hdfs-autoconf/scripts/gen-datanode
@@ -0,0 +1,58 @@
+#!/bin/bash
+
+if [[ -e scripts ]]; then
+ source scripts/common.sh
+else
+ echo "Lacking /scripts directory. Probably I'm not launched from autoconf
+ directory. Exiting.."
+ exit 1
+fi
+
+if [[ ${PWD##*/} != "hdfs-autoconf" ]]; then
+ fail "The script should be launched from ./hdfs-autoconf directory"
+fi
+
+
+usage="USAGE
+ bash $(basename $0) [--help]
+
+DESCRIPTION
+ Creates configuration and launch files for avatar datanode
+ entity by applying given sed script to the template configuration files and
+ bash scripts from $TEMPLATES_DIR. The scripts used for formatting and
+ launching the datanode could be found in $LAUNCHPAD_DIR.
+
+OPTIONS
+ --help - show this help message
+ - a path that specifies datanode configuration
+ sed script. More info about this in README.md
+"
+
+if (( $# >= 1 )); then
+ if [[ $1 == "--help" ]]; then
+ echo "$usage";
+ exit 0;
+ fi
+fi
+if [[ $# < 1 ]]; then
+ fail "$usage"
+fi
+
+cleanLaunchpad;
+
+echo -e "Populating ${cWHITE}${HADOOP_VERSION}/conf${cRESET} with hdfs-site.xml.."
+cp $TEMPLATES_DIR/hdfs-site.xml.template ${HADOOP_VERSION}/conf/hdfs-site.xml
+patch ${HADOOP_VERSION}/conf/hdfs-site.xml $1
+cp $TEMPLATES_DIR/hadoop-env-datanode.sh ${HADOOP_VERSION}/conf/hadoop-env.sh
+
+echo -e "Creating $LAUNCHPAD_DIR files.."
+genDatanodeLaunchpadFiles $1
+
+
+# Finish
+echo -e "${cGREEN}DATANODE SUCCESS${cRESET}
+${cWHITE}${HADOOP_VERSION}/conf${cRESET} is configured for data node.
+------
+You can format configured directories via ${cWHITE}./$LAUNCHPAD_DIR/dn-format${cRESET}
+You can start data node ${capitalAvatarName} via ${cWHITE}./$LAUNCHPAD_DIR/run${cRESET}"
+
diff --git a/hdfs-autoconf/start-dev-cluster.sh b/hdfs-autoconf/start-dev-cluster.sh
new file mode 100644
index 00000000..8fda85f4
--- /dev/null
+++ b/hdfs-autoconf/start-dev-cluster.sh
@@ -0,0 +1,104 @@
+#!/bin/bash
+set -e
+
+usage="USAGE
+ bash $(basename $0) [--help] [--format] [--count number | --seekconfig]
+
+DESCRIPTION
+ Launching without arguments equals to '--seekconfig'
+ specified argument
+
+ Starts locally two avatar namenodes and, depending on the option
+ given, some amount of datanodes. All the instances are launched
+ as daemons, logs could be tailed from \$LOGS_DIRECTORY.
+
+OPTIONS
+ --help - show this help message
+ --format - formats both namenode and datanode directories for the
+ cluster
+ --count - if this parameter is given, the script runs the specified
+ number of datanodes simultaniously. Each datanode's configuration file is
+ generated automatically with the template file
+ '\$METACONF_DIR/avatar-datanode.template' by substituting
+ every entrance of XXX sequence with the datanode's sequential number.
+ After that a datanode instance is run via 'avatar-datanode-start --conf
+ --daemon' command.
+ --seekconfig - this option might be used if someone needs
+ more control on cluster structure. With this option given,
+ the script looks up for all avatar datanode configuration
+ files and launches a single datanode per every config file.
+ (the avatar datanode configuration file is the one which name
+ satisfies to pattern \$DATANODE_CONFIG_FILES)
+"
+
+if (( $# >= 1 )); then
+ if [[ $1 == "--help" ]]; then
+ echo "$usage"
+ exit 0;
+ fi
+fi
+
+format=""
+if (( $# >= 1 )); then
+ if [[ $1 == "--format" ]]; then
+ format="--format";
+ shift;
+ fi
+fi
+
+mode="seek";
+
+if (( $# >= 1 )); then
+ if [[ $1 == "--seekconfig" ]]; then
+ shift;
+ elif [[ $1 == "--count" ]]; then
+ shift;
+ mode=$1;
+ shift;
+ fi
+fi
+
+if (( $# > 0 )); then
+ echo "$usage";
+ exit 1;
+fi
+
+
+if [[ ${PWD##*/} != "hdfs-autoconf" ]]; then
+ echo "The script should be launched from ./hdfs-autoconf directory. Exiting.."
+ exit 1
+fi
+
+source scripts/common.sh
+
+if [[ -e $CLUSTER_IS_RUNNING ]]; then
+ fail "The developer cluster is already running!"
+fi
+
+if [[ "$format" == "--format" ]]; then
+ ./avatar-format
+fi
+
+touch $CLUSTER_IS_RUNNING;
+./avatar-zero-start --daemon
+./avatar-one-start --daemon
+
+if [[ $mode == "seek" ]]; then
+ num=0
+ for i in $(ls -1 $DATANODE_CONFIG_FILES); do
+ num=$((num+1))
+ export HADOOP_LOG_DIR=${LOGS_DIRECTORY}/datanode-$num-logs
+ ./avatar-datanode-start $format --conf $i --daemon
+ done
+else
+ while (( $mode > 0 )); do
+ tmp=$(mktemp -t avatar-datanode-autoconfig-$mode.XXX)
+ args="$format --conf $tmp --daemon"
+ cp ${METACONF_DIR}/avatar-datanode.template $tmp
+ sed -i -e "s/XXX/$mode/g" $tmp
+ ./avatar-datanode-start $args
+ rm -f $tmp
+ mode=$((mode-1));
+ done
+fi
+
diff --git a/hdfs-autoconf/stop-dev-cluster.sh b/hdfs-autoconf/stop-dev-cluster.sh
new file mode 100644
index 00000000..72ce27c1
--- /dev/null
+++ b/hdfs-autoconf/stop-dev-cluster.sh
@@ -0,0 +1,40 @@
+#!/bin/bash
+set -e
+usage="USAGE
+ bash $(basename $0) [--help]
+
+DESCRIPTION
+ Stops all the avatar instances that are currently running on the local
+ machine.
+
+OPTIONS
+ --help - show this help message
+"
+
+if [[ ${PWD##*/} != "hdfs-autoconf" ]]; then
+ echo "The script should be launched from ./hdfs-autoconf directory. Exiting.."
+ exit 1
+fi
+
+if (( $# >= 1 )); then
+ if [[ $1 == "--help" ]]; then
+ echo "$usage";
+ exit 0;
+ fi
+fi
+
+if (( $# > 0 )); then
+ echo "$usage";
+ exit 1;
+fi
+
+source scripts/common.sh
+
+for i in $(ps aux | grep avatar | grep java | tr -s ' ' | cut -d' ' -f2); do
+ kill -9 $i
+done
+
+if [[ -e $CLUSTER_IS_RUNNING ]]; then
+ rm $CLUSTER_IS_RUNNING;
+fi
+
diff --git a/hdfs-autoconf/zookeeper.sh b/hdfs-autoconf/zookeeper.sh
new file mode 100644
index 00000000..cf90b41a
--- /dev/null
+++ b/hdfs-autoconf/zookeeper.sh
@@ -0,0 +1,53 @@
+#!/bin/bash
+
+# Use this script to start local zookeeper service. This is necessary for
+# the local developer cluster to work.
+#
+# Usage: bash zookeeper.sh [start|stop]
+#
+
+set -e
+usage="USAGE
+bash $(basename $0) [--help] (start|stop)
+
+OPTIONS
+ --help - show this help message
+ start - starts zookeeper
+ stop - stops zookeeper
+"
+
+if [[ ${PWD##*/} != "hdfs-autoconf" ]]; then
+ echo "The script should be launched from ./hdfs-autoconf directory. Exiting.."
+ exit 1
+fi
+
+if (( $# >= 1 )); then
+ if [[ $1 == "--help" ]]; then
+ echo $usage;
+ exit 0;
+ fi
+fi
+
+if (( $# > 0 )); then
+ if [[ "$1" == "start" ]]; then
+ command="start";
+ shift;
+ elif [[ "$1" == "stop" ]]; then
+ command="stop";
+ shift;
+ fi
+fi
+
+if (( $# > 0 )); then
+ echo "$usage"
+ exit 1;
+fi
+
+source config.sh
+
+export ZOO_LOG_DIR=$LOGS_DIRECTORY
+if ! [[ -e $ZOO_LOG_DIR ]]; then
+ mkdir -p $ZOO_LOG_DIR;
+fi
+$ZOOKEEPER_PATH/bin/zkServer.sh $command $PWD/$TEMPLATES_DIR/zoo.cfg
+
diff --git a/ivy.xml b/ivy.xml
index 28983085..43e855f0 100644
--- a/ivy.xml
+++ b/ivy.xml
@@ -274,7 +274,7 @@
conf="common->master"/>
diff --git a/ivy/ivy-2.0.0-rc2.jar b/ivy/ivy-2.1.0.jar
similarity index 55%
rename from ivy/ivy-2.0.0-rc2.jar
rename to ivy/ivy-2.1.0.jar
index fa9ef21c..3902b6fd 100644
Binary files a/ivy/ivy-2.0.0-rc2.jar and b/ivy/ivy-2.1.0.jar differ
diff --git a/ivy/libraries.properties b/ivy/libraries.properties
index dedb8090..e371b632 100644
--- a/ivy/libraries.properties
+++ b/ivy/libraries.properties
@@ -40,7 +40,7 @@ guava.version=r09
hsqldb.version=1.8.0.10
#ivy.version=2.0.0-beta2
-ivy.version=2.0.0-rc2
+ivy.version=2.1.0
jasper.version=5.5.12
#not able to figureout the version of jsp & jsp-api version to get it resolved throught ivy
diff --git a/lib/zookeeper-3.3.1.jar b/lib/zookeeper-3.3.1.jar
deleted file mode 100644
index 3b4cca85..00000000
Binary files a/lib/zookeeper-3.3.1.jar and /dev/null differ
diff --git a/lib/zookeeper-3.4.3.jar b/lib/zookeeper-3.4.3.jar
new file mode 100644
index 00000000..1aeb1c39
Binary files /dev/null and b/lib/zookeeper-3.4.3.jar differ
diff --git a/singleNodeHadoop/coronaConf/corona.xml b/singleNodeHadoop/coronaConf/corona.xml
index 16daa2eb..e435a5af 100755
--- a/singleNodeHadoop/coronaConf/corona.xml
+++ b/singleNodeHadoop/coronaConf/corona.xml
@@ -7,4 +7,6 @@
0.9
60000
30000
+
+
diff --git a/singleNodeHadoop/coronaConf/coronapools.xml b/singleNodeHadoop/coronaConf/coronapools.xml
index af5e2958..4a48091c 100755
--- a/singleNodeHadoop/coronaConf/coronapools.xml
+++ b/singleNodeHadoop/coronaConf/coronapools.xml
@@ -24,5 +24,8 @@
100
+
+ 10
+
diff --git a/singleNodeHadoop/coronaConf/mapred-site.xml b/singleNodeHadoop/coronaConf/mapred-site.xml
index f48afb24..b2c20e82 100644
--- a/singleNodeHadoop/coronaConf/mapred-site.xml
+++ b/singleNodeHadoop/coronaConf/mapred-site.xml
@@ -5,6 +5,17 @@
+
+ cm.heartbeat.delay.max
+ 300000
+
+
+
+ cm.config.reload.period.ms
+ 5000
+ How often to reload the config
+
+
cm.pools.config.file
coronapools.xml
diff --git a/singleNodeHadoop/singleNodeSwitch.sh b/singleNodeHadoop/singleNodeSwitch.sh
index a2571a5e..b4d62b72 100755
--- a/singleNodeHadoop/singleNodeSwitch.sh
+++ b/singleNodeHadoop/singleNodeSwitch.sh
@@ -8,7 +8,9 @@ else
echo "If this is run for the first time, you should execute 'hadoop namenode -format'."
echo "Then, you can start HDFS with 'start-dfs'."
echo "The HDFS web ui is http://localhost:50070"
- export HADOOP_HOME="$BASE_DIR/../"
+ export HADOOP_HOME="$BASE_DIR/../"
+ export HADOOP_LOG_DIR="$HADOOP_HOME/logs"
+ export HADOOP_PID_DIR="${HADOOP_LOG_DIR}"
if [ "$1" == "corona" ]; then
export HADOOP_CONF_DIR="$BASE_DIR/coronaConf"
export HADOOP_CLASSPATH="${HADOOP_HOME}/src/contrib/corona/lib/libthrift-0.7.0.jar"
diff --git a/src/contrib/benchmark/ivy.xml b/src/contrib/benchmark/ivy.xml
index b1bc1d6e..7da22d85 100644
--- a/src/contrib/benchmark/ivy.xml
+++ b/src/contrib/benchmark/ivy.xml
@@ -40,7 +40,7 @@
conf="common->default"/>
diff --git a/src/contrib/benchmark/src/java/org/apache/hadoop/mapred/SleepJobRunner.java b/src/contrib/benchmark/src/java/org/apache/hadoop/mapred/SleepJobRunner.java
index 233a3c75..b0543578 100644
--- a/src/contrib/benchmark/src/java/org/apache/hadoop/mapred/SleepJobRunner.java
+++ b/src/contrib/benchmark/src/java/org/apache/hadoop/mapred/SleepJobRunner.java
@@ -191,11 +191,12 @@ public static void main(String[] args) throws Exception {
if (t.getNumMappers() == largeJobMappers &&
t.getNumReducers() == largeJobReducers) {
largeJobRuntimes.add(Double.valueOf(t.getRuntime()/1000.0));
- } else if (t.getNumMappers() == largeJobMappers &&
- t.getNumReducers() == largeJobReducers) {
+ } else if (t.getNumMappers() == smallJobMappers &&
+ t.getNumReducers() == smallJobReducers) {
smallJobRuntimes.add(Double.valueOf(t.getRuntime()/1000.0));
} else {
- throw new RuntimeException("Invalid mapper/reducer counts");
+ throw new RuntimeException("Invalid mapper/reducer counts: " +
+ t.getNumMappers() + ", " + t.getNumReducers());
}
}
diff --git a/src/contrib/build-contrib.xml b/src/contrib/build-contrib.xml
index 7847ac4b..9ab54959 100644
--- a/src/contrib/build-contrib.xml
+++ b/src/contrib/build-contrib.xml
@@ -42,7 +42,9 @@
+
+
@@ -261,9 +263,10 @@
+ timeout="${test.timeout}"
+ maxmemory="${test.junit.maxmemory}">
@@ -290,7 +293,7 @@
+ includes="**/${test.include}.java" excludes="**/${test.exclude}.java" />
diff --git a/src/contrib/build.xml b/src/contrib/build.xml
index 08cf66a0..16d932cd 100644
--- a/src/contrib/build.xml
+++ b/src/contrib/build.xml
@@ -36,6 +36,7 @@
+
@@ -44,7 +45,10 @@
-
+
+
+
+
@@ -74,7 +78,7 @@
-
+
@@ -88,9 +92,6 @@
-
-
-
@@ -105,7 +106,6 @@
-
diff --git a/src/contrib/corona/README.txt b/src/contrib/corona/README.txt
new file mode 100644
index 00000000..3858b62f
--- /dev/null
+++ b/src/contrib/corona/README.txt
@@ -0,0 +1,17 @@
+Overview
+---------
+Hadoop Corona is the next version of Map-Reduce. The current Map-Reduce has a single Job Tracker that reached its limits at Facebook. The Job Tracker manages the cluster resource and tracks the state of each job. In Hadoop Corona, the cluster resources are tracked by a central Cluster Manager. Each job gets its own Corona Job Tracker which tracks just that one job. The design provides some key improvements:
+
+- Scalability - The Cluster Manager tracks a small amount of information per job, and the individual Corona Job Trackers do the tracking of the tasks. This provides much better scalability with the number and size of jobs, and removes the need for Admission Control.
+- Latency - task scheduling works in push model. A Corona Job Tracker pushes resource requests to the Cluster Manager and the Cluster Manager pushes resource grants back to the Corona Job Tracker. After receiving resource grants, the Corona Job Tracker pushes tasks to the Corona Task Tracker. This is contrast to the current Map-Reduce, where such scheduling decisions happen when heartbeats are received. The latency associated with the heartbeat model becomes important for small jobs.
+- Fairness - generally Fair Scheduler in Corona does a better job of allocating fair shares of the resources to the pools when compared to Map-Reduce v1.
+- Cluster Utilization - because of a lower scheduling overhead Corona does a better job of supplying Task Trackers with work. This way the cluster is more heavily utilized.
+
+Understanding Corona
+--------------------
+A Corona Map-Reduce cluster consists of the following components:
+
+Cluster Manager: There is only one Cluster Manager per cluster. It is responsible for allocating slots to different Jobs (using the Fair Scheduler). The Cluster Manager only keeps track of the utilization of different machines in the cluster and the assignment of compute resources to different Jobs. It's not responsible for actually running the jobs. The Cluster Manager is agnostic to map-reduce. It can (in the future) be used to schedule compute resources for any parallel computing framework
+Task Trackers: This is same as Hadoop Classic. All TT's communicate with the Cluster Manager to report available compute resources. They also communicate with the Job Trackers to actually run Map-Reduce Tasks.
+Corona Job Tracker: The job tracking functionality is implemented by this. It can run in two different modes: as a part of the client running the job, or as a task on one of the Task Trackers in the cluster. The first approach gives small jobs better latencies, the second approach is better for larger jobs to minimize the amount of heartbeat traffic going in and out of the cluster.
+Proxy Job Tracker: The job details page for a job is served by the Corona Job Tracker while it runs. When the job finishes the Corona Job Tracker shuts down so we need another server to show the job details. To make this seamless, the Job URL always points to a Proxy Job Tracker. While the job is running, the proxy redirects to the Corona Job Tracker. When the job is done, a file is written to HDFS, and the Proxy Job Tracker reads this file to get the job details. Additionally the Proxy Job Tracker also stores and reports all of the job metrics aggregated in the cluster.
diff --git a/src/contrib/corona/build.xml b/src/contrib/corona/build.xml
index b789cf77..5257c30b 100644
--- a/src/contrib/corona/build.xml
+++ b/src/contrib/corona/build.xml
@@ -117,6 +117,7 @@
deprecation="${javac.deprecation}">
+
diff --git a/src/contrib/corona/interface/ClusterManager.thrift b/src/contrib/corona/interface/ClusterManager.thrift
index 43f7af4e..92293e49 100644
--- a/src/contrib/corona/interface/ClusterManager.thrift
+++ b/src/contrib/corona/interface/ClusterManager.thrift
@@ -41,7 +41,7 @@ struct ClusterNodeInfo {
1: required string name,
2: required InetAddress address,
3: required ComputeSpecs total,
- 4: optional ComputeSpecs used,
+ 4: optional ComputeSpecs free,
5: optional map resourceInfos
}
@@ -105,6 +105,7 @@ enum SessionStatus {
KILLED,
TIMED_OUT,
KILLED_ABORTED,
+ FAILED_JOBTRACKER,
}
typedef string SessionHandle
@@ -126,6 +127,11 @@ struct SessionInfo {
9: optional i64 deadline
}
+struct HeartbeatArgs{
+ 1: required ResourceRequestId requestId,
+ 2: required ResourceRequestId grantId,
+}
+
struct ClusterManagerInfo {
1: required string url,
2: required string jobHistoryLocation,
@@ -146,6 +152,19 @@ struct RunningSession {
6: optional i64 deadline,
7: optional map runningResources,
}
+
+struct NodeHeartbeatResponse{
+ 1: required bool restartFlag,
+}
+
+struct RestartNodesArgs {
+ 1: required bool force,
+ 2: required i32 batchSize,
+}
+
+struct RestartNodesResponse {
+}
+
exception InvalidSessionHandle {
1: required string handle
}
@@ -154,6 +173,13 @@ exception DisallowedNode {
1: required string host;
}
+exception InvalidPoolInfo {
+ 1: required string poolInfo;
+}
+
+exception SafeModeException {
+}
+
/**
* The Session Driver manages the session for clients.
* The APIs below are invoked by the ClusterManager to convey information back to the
@@ -173,46 +199,61 @@ service SessionDriverService {
* Cluster Manager Service API.
*/
service ClusterManagerService {
+
+ // Get the redirect pool info given the user specified pool info
+ PoolInfoStrings getActualPoolInfo(1: PoolInfoStrings poolInfoString) throws (1: InvalidPoolInfo e, 2: SafeModeException f),
+
// Get a unique session id.
- SessionHandle getNextSessionId(),
+ SessionHandle getNextSessionId() throws (1: SafeModeException e),
// Register a session start, return a handle to the session.
- SessionRegistrationData sessionStart(1: SessionHandle handle, 2: SessionInfo info) throws (1: InvalidSessionHandle e),
+ SessionRegistrationData sessionStart(1: SessionHandle handle, 2: SessionInfo info) throws (1: InvalidSessionHandle e, 2: SafeModeException f),
// Register a URL for the session. An extra call is provided because the URL
// URL may depend on the sessionId obtained from sessionStart
- void sessionUpdateInfo(1: SessionHandle handle, 2: SessionInfo info) throws (1: InvalidSessionHandle e),
+ void sessionUpdateInfo(1: SessionHandle handle, 2: SessionInfo info) throws (1: InvalidSessionHandle e, 2: SafeModeException f),
// Notify session end.
- void sessionEnd(1: SessionHandle handle, 2: SessionStatus status) throws (1: InvalidSessionHandle e),
+ void sessionEnd(1: SessionHandle handle, 2: SessionStatus status) throws (1: InvalidSessionHandle e, 2: SafeModeException f),
// Heartbeat a session.
- void sessionHeartbeat(1: SessionHandle handle) throws (1: InvalidSessionHandle e),
+ void sessionHeartbeat(1: SessionHandle handle) throws (1: InvalidSessionHandle e, 2: SafeModeException f),
+
+ void sessionHeartbeatV2(1: SessionHandle handle, 2: HeartbeatArgs heartbeatArgs) throws (1: InvalidSessionHandle e, 2: SafeModeException f),
// Request additional resources. A request is required for each resource
// requested.
- void requestResource(1: SessionHandle handle, 2: list requestList) throws (1: InvalidSessionHandle e),
+ void requestResource(1: SessionHandle handle, 2: list requestList) throws (1: InvalidSessionHandle e, 2: SafeModeException f),
// Release granted/requested resources.
- void releaseResource(1: SessionHandle handle, 2: list idList) throws (1: InvalidSessionHandle e),
+ void releaseResource(1: SessionHandle handle, 2: list idList) throws (1: InvalidSessionHandle e, 2: SafeModeException f),
// Heartbeat a cluster node. This is an implicit advertisement of the node's resources
- void nodeHeartbeat(1: ClusterNodeInfo node) throws (1: DisallowedNode e),
+ NodeHeartbeatResponse nodeHeartbeat(1: ClusterNodeInfo node) throws (1: DisallowedNode e, 2: SafeModeException f),
// Feedback from a session on the resources that it was given.
void nodeFeedback(
1: SessionHandle handle,
2: list resourceTypes,
- 3: list stats) throws (1: InvalidSessionHandle e),
+ 3: list stats) throws (1: InvalidSessionHandle e, 2: SafeModeException f),
// Refresh node information.
- void refreshNodes(),
+ void refreshNodes() throws (1: SafeModeException e),
+
+ // Restart task tracker.
+ RestartNodesResponse restartNodes(1: RestartNodesArgs restartNodesArgs) throws (1: SafeModeException e),
// Get the list of currently running sessions
- list getSessions(),
+ list getSessions() throws (1: SafeModeException e),
// Kill one of the currently running sessions
- void killSession(1: string sessionId)
+ void killSession(1: string sessionId) throws (1: SafeModeException e),
+
+ // Switch the Cluster Manager to Safe Mode
+ bool setSafeMode(1: bool safeMode),
+
+ // Persist the Cluster Manager state to disk
+ bool persistState()
}
/**
@@ -225,3 +266,14 @@ service CoronaTaskTrackerService {
// Tell task tracker to reject all actions from this session
void blacklistSession(1: SessionHandle handle) throws (1: InvalidSessionHandle e),
}
+
+/**
+ * Corona ProxyJobTracker Service API.
+ */
+service CoronaProxyJobTrackerService {
+ // Set the clusterManagerSafeMode flag appropriately on the CPJT
+ void setClusterManagerSafeModeFlag(1: bool flagValue)
+
+ // Get the clusterManagerSafeMode flag
+ bool getClusterManagerSafeModeFlag()
+}
diff --git a/src/contrib/corona/ivy.xml b/src/contrib/corona/ivy.xml
index 7e041ddc..3036b434 100644
--- a/src/contrib/corona/ivy.xml
+++ b/src/contrib/corona/ivy.xml
@@ -85,6 +85,10 @@
name="netty"
rev="${netty.version}"
conf="common->master"/>
+
diff --git a/src/contrib/corona/ivy/libraries.properties b/src/contrib/corona/ivy/libraries.properties
index 6391be22..6fd346cf 100644
--- a/src/contrib/corona/ivy/libraries.properties
+++ b/src/contrib/corona/ivy/libraries.properties
@@ -23,6 +23,9 @@ checkstyle.version=5.0
guava.version=r09
-jackson.version=1.0.1
+jackson.version=1.7.9
+
+json.version=20090211
netty.version=3.2.2.Final
+
diff --git a/src/contrib/corona/src/gen-java/org/apache/hadoop/corona/ClusterManagerService.java b/src/contrib/corona/src/gen-java/org/apache/hadoop/corona/ClusterManagerService.java
index 7730a97e..eadad0b4 100644
--- a/src/contrib/corona/src/gen-java/org/apache/hadoop/corona/ClusterManagerService.java
+++ b/src/contrib/corona/src/gen-java/org/apache/hadoop/corona/ClusterManagerService.java
@@ -27,34 +27,46 @@ public class ClusterManagerService {
*/
public interface Iface {
- public String getNextSessionId() throws org.apache.thrift.TException;
+ public PoolInfoStrings getActualPoolInfo(PoolInfoStrings poolInfoString) throws InvalidPoolInfo, SafeModeException, org.apache.thrift.TException;
- public SessionRegistrationData sessionStart(String handle, SessionInfo info) throws InvalidSessionHandle, org.apache.thrift.TException;
+ public String getNextSessionId() throws SafeModeException, org.apache.thrift.TException;
- public void sessionUpdateInfo(String handle, SessionInfo info) throws InvalidSessionHandle, org.apache.thrift.TException;
+ public SessionRegistrationData sessionStart(String handle, SessionInfo info) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException;
- public void sessionEnd(String handle, SessionStatus status) throws InvalidSessionHandle, org.apache.thrift.TException;
+ public void sessionUpdateInfo(String handle, SessionInfo info) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException;
- public void sessionHeartbeat(String handle) throws InvalidSessionHandle, org.apache.thrift.TException;
+ public void sessionEnd(String handle, SessionStatus status) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException;
- public void requestResource(String handle, List requestList) throws InvalidSessionHandle, org.apache.thrift.TException;
+ public void sessionHeartbeat(String handle) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException;
- public void releaseResource(String handle, List idList) throws InvalidSessionHandle, org.apache.thrift.TException;
+ public void sessionHeartbeatV2(String handle, HeartbeatArgs heartbeatArgs) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException;
- public void nodeHeartbeat(ClusterNodeInfo node) throws DisallowedNode, org.apache.thrift.TException;
+ public void requestResource(String handle, List requestList) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException;
- public void nodeFeedback(String handle, List resourceTypes, List stats) throws InvalidSessionHandle, org.apache.thrift.TException;
+ public void releaseResource(String handle, List idList) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException;
- public void refreshNodes() throws org.apache.thrift.TException;
+ public NodeHeartbeatResponse nodeHeartbeat(ClusterNodeInfo node) throws DisallowedNode, SafeModeException, org.apache.thrift.TException;
- public List getSessions() throws org.apache.thrift.TException;
+ public void nodeFeedback(String handle, List resourceTypes, List stats) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException;
- public void killSession(String sessionId) throws org.apache.thrift.TException;
+ public void refreshNodes() throws SafeModeException, org.apache.thrift.TException;
+
+ public RestartNodesResponse restartNodes(RestartNodesArgs restartNodesArgs) throws SafeModeException, org.apache.thrift.TException;
+
+ public List getSessions() throws SafeModeException, org.apache.thrift.TException;
+
+ public void killSession(String sessionId) throws SafeModeException, org.apache.thrift.TException;
+
+ public boolean setSafeMode(boolean safeMode) throws org.apache.thrift.TException;
+
+ public boolean persistState() throws org.apache.thrift.TException;
}
public interface AsyncIface {
+ public void getActualPoolInfo(PoolInfoStrings poolInfoString, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
public void getNextSessionId(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void sessionStart(String handle, SessionInfo info, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -65,6 +77,8 @@ public interface AsyncIface {
public void sessionHeartbeat(String handle, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ public void sessionHeartbeatV2(String handle, HeartbeatArgs heartbeatArgs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
public void requestResource(String handle, List requestList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void releaseResource(String handle, List idList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
@@ -75,10 +89,16 @@ public interface AsyncIface {
public void refreshNodes(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ public void restartNodes(RestartNodesArgs restartNodesArgs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
public void getSessions(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
public void killSession(String sessionId, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+ public void setSafeMode(boolean safeMode, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
+ public void persistState(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException;
+
}
public static class Client extends org.apache.thrift.TServiceClient implements Iface {
@@ -101,7 +121,36 @@ public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.prot
super(iprot, oprot);
}
- public String getNextSessionId() throws org.apache.thrift.TException
+ public PoolInfoStrings getActualPoolInfo(PoolInfoStrings poolInfoString) throws InvalidPoolInfo, SafeModeException, org.apache.thrift.TException
+ {
+ send_getActualPoolInfo(poolInfoString);
+ return recv_getActualPoolInfo();
+ }
+
+ public void send_getActualPoolInfo(PoolInfoStrings poolInfoString) throws org.apache.thrift.TException
+ {
+ getActualPoolInfo_args args = new getActualPoolInfo_args();
+ args.setPoolInfoString(poolInfoString);
+ sendBase("getActualPoolInfo", args);
+ }
+
+ public PoolInfoStrings recv_getActualPoolInfo() throws InvalidPoolInfo, SafeModeException, org.apache.thrift.TException
+ {
+ getActualPoolInfo_result result = new getActualPoolInfo_result();
+ receiveBase(result, "getActualPoolInfo");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.e != null) {
+ throw result.e;
+ }
+ if (result.f != null) {
+ throw result.f;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getActualPoolInfo failed: unknown result");
+ }
+
+ public String getNextSessionId() throws SafeModeException, org.apache.thrift.TException
{
send_getNextSessionId();
return recv_getNextSessionId();
@@ -113,17 +162,20 @@ public void send_getNextSessionId() throws org.apache.thrift.TException
sendBase("getNextSessionId", args);
}
- public String recv_getNextSessionId() throws org.apache.thrift.TException
+ public String recv_getNextSessionId() throws SafeModeException, org.apache.thrift.TException
{
getNextSessionId_result result = new getNextSessionId_result();
receiveBase(result, "getNextSessionId");
if (result.isSetSuccess()) {
return result.success;
}
+ if (result.e != null) {
+ throw result.e;
+ }
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getNextSessionId failed: unknown result");
}
- public SessionRegistrationData sessionStart(String handle, SessionInfo info) throws InvalidSessionHandle, org.apache.thrift.TException
+ public SessionRegistrationData sessionStart(String handle, SessionInfo info) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
send_sessionStart(handle, info);
return recv_sessionStart();
@@ -137,7 +189,7 @@ public void send_sessionStart(String handle, SessionInfo info) throws org.apache
sendBase("sessionStart", args);
}
- public SessionRegistrationData recv_sessionStart() throws InvalidSessionHandle, org.apache.thrift.TException
+ public SessionRegistrationData recv_sessionStart() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
sessionStart_result result = new sessionStart_result();
receiveBase(result, "sessionStart");
@@ -147,10 +199,13 @@ public SessionRegistrationData recv_sessionStart() throws InvalidSessionHandle,
if (result.e != null) {
throw result.e;
}
+ if (result.f != null) {
+ throw result.f;
+ }
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "sessionStart failed: unknown result");
}
- public void sessionUpdateInfo(String handle, SessionInfo info) throws InvalidSessionHandle, org.apache.thrift.TException
+ public void sessionUpdateInfo(String handle, SessionInfo info) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
send_sessionUpdateInfo(handle, info);
recv_sessionUpdateInfo();
@@ -164,17 +219,20 @@ public void send_sessionUpdateInfo(String handle, SessionInfo info) throws org.a
sendBase("sessionUpdateInfo", args);
}
- public void recv_sessionUpdateInfo() throws InvalidSessionHandle, org.apache.thrift.TException
+ public void recv_sessionUpdateInfo() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
sessionUpdateInfo_result result = new sessionUpdateInfo_result();
receiveBase(result, "sessionUpdateInfo");
if (result.e != null) {
throw result.e;
}
+ if (result.f != null) {
+ throw result.f;
+ }
return;
}
- public void sessionEnd(String handle, SessionStatus status) throws InvalidSessionHandle, org.apache.thrift.TException
+ public void sessionEnd(String handle, SessionStatus status) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
send_sessionEnd(handle, status);
recv_sessionEnd();
@@ -188,17 +246,20 @@ public void send_sessionEnd(String handle, SessionStatus status) throws org.apac
sendBase("sessionEnd", args);
}
- public void recv_sessionEnd() throws InvalidSessionHandle, org.apache.thrift.TException
+ public void recv_sessionEnd() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
sessionEnd_result result = new sessionEnd_result();
receiveBase(result, "sessionEnd");
if (result.e != null) {
throw result.e;
}
+ if (result.f != null) {
+ throw result.f;
+ }
return;
}
- public void sessionHeartbeat(String handle) throws InvalidSessionHandle, org.apache.thrift.TException
+ public void sessionHeartbeat(String handle) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
send_sessionHeartbeat(handle);
recv_sessionHeartbeat();
@@ -211,17 +272,47 @@ public void send_sessionHeartbeat(String handle) throws org.apache.thrift.TExcep
sendBase("sessionHeartbeat", args);
}
- public void recv_sessionHeartbeat() throws InvalidSessionHandle, org.apache.thrift.TException
+ public void recv_sessionHeartbeat() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
sessionHeartbeat_result result = new sessionHeartbeat_result();
receiveBase(result, "sessionHeartbeat");
if (result.e != null) {
throw result.e;
}
+ if (result.f != null) {
+ throw result.f;
+ }
+ return;
+ }
+
+ public void sessionHeartbeatV2(String handle, HeartbeatArgs heartbeatArgs) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
+ {
+ send_sessionHeartbeatV2(handle, heartbeatArgs);
+ recv_sessionHeartbeatV2();
+ }
+
+ public void send_sessionHeartbeatV2(String handle, HeartbeatArgs heartbeatArgs) throws org.apache.thrift.TException
+ {
+ sessionHeartbeatV2_args args = new sessionHeartbeatV2_args();
+ args.setHandle(handle);
+ args.setHeartbeatArgs(heartbeatArgs);
+ sendBase("sessionHeartbeatV2", args);
+ }
+
+ public void recv_sessionHeartbeatV2() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
+ {
+ sessionHeartbeatV2_result result = new sessionHeartbeatV2_result();
+ receiveBase(result, "sessionHeartbeatV2");
+ if (result.e != null) {
+ throw result.e;
+ }
+ if (result.f != null) {
+ throw result.f;
+ }
return;
}
- public void requestResource(String handle, List requestList) throws InvalidSessionHandle, org.apache.thrift.TException
+ public void requestResource(String handle, List requestList) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
send_requestResource(handle, requestList);
recv_requestResource();
@@ -235,17 +326,20 @@ public void send_requestResource(String handle, List requestLis
sendBase("requestResource", args);
}
- public void recv_requestResource() throws InvalidSessionHandle, org.apache.thrift.TException
+ public void recv_requestResource() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
requestResource_result result = new requestResource_result();
receiveBase(result, "requestResource");
if (result.e != null) {
throw result.e;
}
+ if (result.f != null) {
+ throw result.f;
+ }
return;
}
- public void releaseResource(String handle, List idList) throws InvalidSessionHandle, org.apache.thrift.TException
+ public void releaseResource(String handle, List idList) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
send_releaseResource(handle, idList);
recv_releaseResource();
@@ -259,20 +353,23 @@ public void send_releaseResource(String handle, List idList) throws org
sendBase("releaseResource", args);
}
- public void recv_releaseResource() throws InvalidSessionHandle, org.apache.thrift.TException
+ public void recv_releaseResource() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
releaseResource_result result = new releaseResource_result();
receiveBase(result, "releaseResource");
if (result.e != null) {
throw result.e;
}
+ if (result.f != null) {
+ throw result.f;
+ }
return;
}
- public void nodeHeartbeat(ClusterNodeInfo node) throws DisallowedNode, org.apache.thrift.TException
+ public NodeHeartbeatResponse nodeHeartbeat(ClusterNodeInfo node) throws DisallowedNode, SafeModeException, org.apache.thrift.TException
{
send_nodeHeartbeat(node);
- recv_nodeHeartbeat();
+ return recv_nodeHeartbeat();
}
public void send_nodeHeartbeat(ClusterNodeInfo node) throws org.apache.thrift.TException
@@ -282,17 +379,23 @@ public void send_nodeHeartbeat(ClusterNodeInfo node) throws org.apache.thrift.TE
sendBase("nodeHeartbeat", args);
}
- public void recv_nodeHeartbeat() throws DisallowedNode, org.apache.thrift.TException
+ public NodeHeartbeatResponse recv_nodeHeartbeat() throws DisallowedNode, SafeModeException, org.apache.thrift.TException
{
nodeHeartbeat_result result = new nodeHeartbeat_result();
receiveBase(result, "nodeHeartbeat");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
if (result.e != null) {
throw result.e;
}
- return;
+ if (result.f != null) {
+ throw result.f;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "nodeHeartbeat failed: unknown result");
}
- public void nodeFeedback(String handle, List resourceTypes, List stats) throws InvalidSessionHandle, org.apache.thrift.TException
+ public void nodeFeedback(String handle, List resourceTypes, List stats) throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
send_nodeFeedback(handle, resourceTypes, stats);
recv_nodeFeedback();
@@ -307,17 +410,20 @@ public void send_nodeFeedback(String handle, List resourceTypes, L
sendBase("nodeFeedback", args);
}
- public void recv_nodeFeedback() throws InvalidSessionHandle, org.apache.thrift.TException
+ public void recv_nodeFeedback() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException
{
nodeFeedback_result result = new nodeFeedback_result();
receiveBase(result, "nodeFeedback");
if (result.e != null) {
throw result.e;
}
+ if (result.f != null) {
+ throw result.f;
+ }
return;
}
- public void refreshNodes() throws org.apache.thrift.TException
+ public void refreshNodes() throws SafeModeException, org.apache.thrift.TException
{
send_refreshNodes();
recv_refreshNodes();
@@ -329,14 +435,43 @@ public void send_refreshNodes() throws org.apache.thrift.TException
sendBase("refreshNodes", args);
}
- public void recv_refreshNodes() throws org.apache.thrift.TException
+ public void recv_refreshNodes() throws SafeModeException, org.apache.thrift.TException
{
refreshNodes_result result = new refreshNodes_result();
receiveBase(result, "refreshNodes");
+ if (result.e != null) {
+ throw result.e;
+ }
return;
}
- public List getSessions() throws org.apache.thrift.TException
+ public RestartNodesResponse restartNodes(RestartNodesArgs restartNodesArgs) throws SafeModeException, org.apache.thrift.TException
+ {
+ send_restartNodes(restartNodesArgs);
+ return recv_restartNodes();
+ }
+
+ public void send_restartNodes(RestartNodesArgs restartNodesArgs) throws org.apache.thrift.TException
+ {
+ restartNodes_args args = new restartNodes_args();
+ args.setRestartNodesArgs(restartNodesArgs);
+ sendBase("restartNodes", args);
+ }
+
+ public RestartNodesResponse recv_restartNodes() throws SafeModeException, org.apache.thrift.TException
+ {
+ restartNodes_result result = new restartNodes_result();
+ receiveBase(result, "restartNodes");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ if (result.e != null) {
+ throw result.e;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "restartNodes failed: unknown result");
+ }
+
+ public List getSessions() throws SafeModeException, org.apache.thrift.TException
{
send_getSessions();
return recv_getSessions();
@@ -348,17 +483,20 @@ public void send_getSessions() throws org.apache.thrift.TException
sendBase("getSessions", args);
}
- public List recv_getSessions() throws org.apache.thrift.TException
+ public List recv_getSessions() throws SafeModeException, org.apache.thrift.TException
{
getSessions_result result = new getSessions_result();
receiveBase(result, "getSessions");
if (result.isSetSuccess()) {
return result.success;
}
+ if (result.e != null) {
+ throw result.e;
+ }
throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getSessions failed: unknown result");
}
- public void killSession(String sessionId) throws org.apache.thrift.TException
+ public void killSession(String sessionId) throws SafeModeException, org.apache.thrift.TException
{
send_killSession(sessionId);
recv_killSession();
@@ -371,13 +509,61 @@ public void send_killSession(String sessionId) throws org.apache.thrift.TExcepti
sendBase("killSession", args);
}
- public void recv_killSession() throws org.apache.thrift.TException
+ public void recv_killSession() throws SafeModeException, org.apache.thrift.TException
{
killSession_result result = new killSession_result();
receiveBase(result, "killSession");
+ if (result.e != null) {
+ throw result.e;
+ }
return;
}
+ public boolean setSafeMode(boolean safeMode) throws org.apache.thrift.TException
+ {
+ send_setSafeMode(safeMode);
+ return recv_setSafeMode();
+ }
+
+ public void send_setSafeMode(boolean safeMode) throws org.apache.thrift.TException
+ {
+ setSafeMode_args args = new setSafeMode_args();
+ args.setSafeMode(safeMode);
+ sendBase("setSafeMode", args);
+ }
+
+ public boolean recv_setSafeMode() throws org.apache.thrift.TException
+ {
+ setSafeMode_result result = new setSafeMode_result();
+ receiveBase(result, "setSafeMode");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "setSafeMode failed: unknown result");
+ }
+
+ public boolean persistState() throws org.apache.thrift.TException
+ {
+ send_persistState();
+ return recv_persistState();
+ }
+
+ public void send_persistState() throws org.apache.thrift.TException
+ {
+ persistState_args args = new persistState_args();
+ sendBase("persistState", args);
+ }
+
+ public boolean recv_persistState() throws org.apache.thrift.TException
+ {
+ persistState_result result = new persistState_result();
+ receiveBase(result, "persistState");
+ if (result.isSetSuccess()) {
+ return result.success;
+ }
+ throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "persistState failed: unknown result");
+ }
+
}
public static class AsyncClient extends org.apache.thrift.async.TAsyncClient implements AsyncIface {
public static class Factory implements org.apache.thrift.async.TAsyncClientFactory {
@@ -396,6 +582,38 @@ public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory,
super(protocolFactory, clientManager, transport);
}
+ public void getActualPoolInfo(PoolInfoStrings poolInfoString, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ getActualPoolInfo_call method_call = new getActualPoolInfo_call(poolInfoString, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ public static class getActualPoolInfo_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private PoolInfoStrings poolInfoString;
+ public getActualPoolInfo_call(PoolInfoStrings poolInfoString, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.poolInfoString = poolInfoString;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getActualPoolInfo", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ getActualPoolInfo_args args = new getActualPoolInfo_args();
+ args.setPoolInfoString(poolInfoString);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public PoolInfoStrings getResult() throws InvalidPoolInfo, SafeModeException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_getActualPoolInfo();
+ }
+ }
+
public void getNextSessionId(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
getNextSessionId_call method_call = new getNextSessionId_call(resultHandler, this, ___protocolFactory, ___transport);
@@ -415,7 +633,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public String getResult() throws org.apache.thrift.TException {
+ public String getResult() throws SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -450,7 +668,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public SessionRegistrationData getResult() throws InvalidSessionHandle, org.apache.thrift.TException {
+ public SessionRegistrationData getResult() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -485,7 +703,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public void getResult() throws InvalidSessionHandle, org.apache.thrift.TException {
+ public void getResult() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -520,7 +738,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public void getResult() throws InvalidSessionHandle, org.apache.thrift.TException {
+ public void getResult() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -552,7 +770,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public void getResult() throws InvalidSessionHandle, org.apache.thrift.TException {
+ public void getResult() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -562,6 +780,41 @@ public void getResult() throws InvalidSessionHandle, org.apache.thrift.TExceptio
}
}
+ public void sessionHeartbeatV2(String handle, HeartbeatArgs heartbeatArgs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ sessionHeartbeatV2_call method_call = new sessionHeartbeatV2_call(handle, heartbeatArgs, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ public static class sessionHeartbeatV2_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private String handle;
+ private HeartbeatArgs heartbeatArgs;
+ public sessionHeartbeatV2_call(String handle, HeartbeatArgs heartbeatArgs, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.handle = handle;
+ this.heartbeatArgs = heartbeatArgs;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("sessionHeartbeatV2", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ sessionHeartbeatV2_args args = new sessionHeartbeatV2_args();
+ args.setHandle(handle);
+ args.setHeartbeatArgs(heartbeatArgs);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public void getResult() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ (new Client(prot)).recv_sessionHeartbeatV2();
+ }
+ }
+
public void requestResource(String handle, List requestList, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
requestResource_call method_call = new requestResource_call(handle, requestList, resultHandler, this, ___protocolFactory, ___transport);
@@ -587,7 +840,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public void getResult() throws InvalidSessionHandle, org.apache.thrift.TException {
+ public void getResult() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -622,7 +875,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public void getResult() throws InvalidSessionHandle, org.apache.thrift.TException {
+ public void getResult() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -654,13 +907,13 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public void getResult() throws DisallowedNode, org.apache.thrift.TException {
+ public NodeHeartbeatResponse getResult() throws DisallowedNode, SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
- (new Client(prot)).recv_nodeHeartbeat();
+ return (new Client(prot)).recv_nodeHeartbeat();
}
}
@@ -692,7 +945,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public void getResult() throws InvalidSessionHandle, org.apache.thrift.TException {
+ public void getResult() throws InvalidSessionHandle, SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -721,7 +974,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public void getResult() throws org.apache.thrift.TException {
+ public void getResult() throws SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -731,6 +984,38 @@ public void getResult() throws org.apache.thrift.TException {
}
}
+ public void restartNodes(RestartNodesArgs restartNodesArgs, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ restartNodes_call method_call = new restartNodes_call(restartNodesArgs, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ public static class restartNodes_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private RestartNodesArgs restartNodesArgs;
+ public restartNodes_call(RestartNodesArgs restartNodesArgs, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.restartNodesArgs = restartNodesArgs;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("restartNodes", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ restartNodes_args args = new restartNodes_args();
+ args.setRestartNodesArgs(restartNodesArgs);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public RestartNodesResponse getResult() throws SafeModeException, org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_restartNodes();
+ }
+ }
+
public void getSessions(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
checkReady();
getSessions_call method_call = new getSessions_call(resultHandler, this, ___protocolFactory, ___transport);
@@ -750,7 +1035,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public List getResult() throws org.apache.thrift.TException {
+ public List getResult() throws SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -782,7 +1067,7 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa
prot.writeMessageEnd();
}
- public void getResult() throws org.apache.thrift.TException {
+ public void getResult() throws SafeModeException, org.apache.thrift.TException {
if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
throw new IllegalStateException("Method call not finished!");
}
@@ -792,6 +1077,67 @@ public void getResult() throws org.apache.thrift.TException {
}
}
+ public void setSafeMode(boolean safeMode, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ setSafeMode_call method_call = new setSafeMode_call(safeMode, resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ public static class setSafeMode_call extends org.apache.thrift.async.TAsyncMethodCall {
+ private boolean safeMode;
+ public setSafeMode_call(boolean safeMode, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ this.safeMode = safeMode;
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("setSafeMode", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ setSafeMode_args args = new setSafeMode_args();
+ args.setSafeMode(safeMode);
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public boolean getResult() throws org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_setSafeMode();
+ }
+ }
+
+ public void persistState(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException {
+ checkReady();
+ persistState_call method_call = new persistState_call(resultHandler, this, ___protocolFactory, ___transport);
+ this.___currentMethod = method_call;
+ ___manager.call(method_call);
+ }
+
+ public static class persistState_call extends org.apache.thrift.async.TAsyncMethodCall {
+ public persistState_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException {
+ super(client, protocolFactory, transport, resultHandler, false);
+ }
+
+ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException {
+ prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("persistState", org.apache.thrift.protocol.TMessageType.CALL, 0));
+ persistState_args args = new persistState_args();
+ args.write(prot);
+ prot.writeMessageEnd();
+ }
+
+ public boolean getResult() throws org.apache.thrift.TException {
+ if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) {
+ throw new IllegalStateException("Method call not finished!");
+ }
+ org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array());
+ org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport);
+ return (new Client(prot)).recv_persistState();
+ }
+ }
+
}
public static class Processor extends org.apache.thrift.TBaseProcessor implements org.apache.thrift.TProcessor {
@@ -805,21 +1151,48 @@ protected Processor(I iface, Map Map> getProcessMap(Map> processMap) {
+ processMap.put("getActualPoolInfo", new getActualPoolInfo());
processMap.put("getNextSessionId", new getNextSessionId());
processMap.put("sessionStart", new sessionStart());
processMap.put("sessionUpdateInfo", new sessionUpdateInfo());
processMap.put("sessionEnd", new sessionEnd());
processMap.put("sessionHeartbeat", new sessionHeartbeat());
+ processMap.put("sessionHeartbeatV2", new sessionHeartbeatV2());
processMap.put("requestResource", new requestResource());
processMap.put("releaseResource", new releaseResource());
processMap.put("nodeHeartbeat", new nodeHeartbeat());
processMap.put("nodeFeedback", new nodeFeedback());
processMap.put("refreshNodes", new refreshNodes());
+ processMap.put("restartNodes", new restartNodes());
processMap.put("getSessions", new getSessions());
processMap.put("killSession", new killSession());
+ processMap.put("setSafeMode", new setSafeMode());
+ processMap.put("persistState", new persistState());
return processMap;
}
+ private static class getActualPoolInfo extends org.apache.thrift.ProcessFunction {
+ public getActualPoolInfo() {
+ super("getActualPoolInfo");
+ }
+
+ protected getActualPoolInfo_args getEmptyArgsInstance() {
+ return new getActualPoolInfo_args();
+ }
+
+ protected getActualPoolInfo_result getResult(I iface, getActualPoolInfo_args args) throws org.apache.thrift.TException {
+ getActualPoolInfo_result result = new getActualPoolInfo_result();
+ try {
+ result.success = iface.getActualPoolInfo(args.poolInfoString);
+ } catch (InvalidPoolInfo e) {
+ result.e = e;
+ } catch (SafeModeException f) {
+ result.f = f;
+ }
+ return result;
+ }
+ }
+
private static class getNextSessionId extends org.apache.thrift.ProcessFunction {
public getNextSessionId() {
super("getNextSessionId");
@@ -831,7 +1204,11 @@ protected getNextSessionId_args getEmptyArgsInstance() {
protected getNextSessionId_result getResult(I iface, getNextSessionId_args args) throws org.apache.thrift.TException {
getNextSessionId_result result = new getNextSessionId_result();
- result.success = iface.getNextSessionId();
+ try {
+ result.success = iface.getNextSessionId();
+ } catch (SafeModeException e) {
+ result.e = e;
+ }
return result;
}
}
@@ -851,6 +1228,8 @@ protected sessionStart_result getResult(I iface, sessionStart_args args) throws
result.success = iface.sessionStart(args.handle, args.info);
} catch (InvalidSessionHandle e) {
result.e = e;
+ } catch (SafeModeException f) {
+ result.f = f;
}
return result;
}
@@ -871,6 +1250,8 @@ protected sessionUpdateInfo_result getResult(I iface, sessionUpdateInfo_args arg
iface.sessionUpdateInfo(args.handle, args.info);
} catch (InvalidSessionHandle e) {
result.e = e;
+ } catch (SafeModeException f) {
+ result.f = f;
}
return result;
}
@@ -891,6 +1272,8 @@ protected sessionEnd_result getResult(I iface, sessionEnd_args args) throws org.
iface.sessionEnd(args.handle, args.status);
} catch (InvalidSessionHandle e) {
result.e = e;
+ } catch (SafeModeException f) {
+ result.f = f;
}
return result;
}
@@ -911,6 +1294,30 @@ protected sessionHeartbeat_result getResult(I iface, sessionHeartbeat_args args)
iface.sessionHeartbeat(args.handle);
} catch (InvalidSessionHandle e) {
result.e = e;
+ } catch (SafeModeException f) {
+ result.f = f;
+ }
+ return result;
+ }
+ }
+
+ private static class sessionHeartbeatV2 extends org.apache.thrift.ProcessFunction {
+ public sessionHeartbeatV2() {
+ super("sessionHeartbeatV2");
+ }
+
+ protected sessionHeartbeatV2_args getEmptyArgsInstance() {
+ return new sessionHeartbeatV2_args();
+ }
+
+ protected sessionHeartbeatV2_result getResult(I iface, sessionHeartbeatV2_args args) throws org.apache.thrift.TException {
+ sessionHeartbeatV2_result result = new sessionHeartbeatV2_result();
+ try {
+ iface.sessionHeartbeatV2(args.handle, args.heartbeatArgs);
+ } catch (InvalidSessionHandle e) {
+ result.e = e;
+ } catch (SafeModeException f) {
+ result.f = f;
}
return result;
}
@@ -931,6 +1338,8 @@ protected requestResource_result getResult(I iface, requestResource_args args) t
iface.requestResource(args.handle, args.requestList);
} catch (InvalidSessionHandle e) {
result.e = e;
+ } catch (SafeModeException f) {
+ result.f = f;
}
return result;
}
@@ -951,6 +1360,8 @@ protected releaseResource_result getResult(I iface, releaseResource_args args) t
iface.releaseResource(args.handle, args.idList);
} catch (InvalidSessionHandle e) {
result.e = e;
+ } catch (SafeModeException f) {
+ result.f = f;
}
return result;
}
@@ -968,9 +1379,11 @@ protected nodeHeartbeat_args getEmptyArgsInstance() {
protected nodeHeartbeat_result getResult(I iface, nodeHeartbeat_args args) throws org.apache.thrift.TException {
nodeHeartbeat_result result = new nodeHeartbeat_result();
try {
- iface.nodeHeartbeat(args.node);
+ result.success = iface.nodeHeartbeat(args.node);
} catch (DisallowedNode e) {
result.e = e;
+ } catch (SafeModeException f) {
+ result.f = f;
}
return result;
}
@@ -991,6 +1404,8 @@ protected nodeFeedback_result getResult(I iface, nodeFeedback_args args) throws
iface.nodeFeedback(args.handle, args.resourceTypes, args.stats);
} catch (InvalidSessionHandle e) {
result.e = e;
+ } catch (SafeModeException f) {
+ result.f = f;
}
return result;
}
@@ -1007,7 +1422,31 @@ protected refreshNodes_args getEmptyArgsInstance() {
protected refreshNodes_result getResult(I iface, refreshNodes_args args) throws org.apache.thrift.TException {
refreshNodes_result result = new refreshNodes_result();
- iface.refreshNodes();
+ try {
+ iface.refreshNodes();
+ } catch (SafeModeException e) {
+ result.e = e;
+ }
+ return result;
+ }
+ }
+
+ private static class restartNodes extends org.apache.thrift.ProcessFunction {
+ public restartNodes() {
+ super("restartNodes");
+ }
+
+ protected restartNodes_args getEmptyArgsInstance() {
+ return new restartNodes_args();
+ }
+
+ protected restartNodes_result getResult(I iface, restartNodes_args args) throws org.apache.thrift.TException {
+ restartNodes_result result = new restartNodes_result();
+ try {
+ result.success = iface.restartNodes(args.restartNodesArgs);
+ } catch (SafeModeException e) {
+ result.e = e;
+ }
return result;
}
}
@@ -1023,7 +1462,11 @@ protected getSessions_args getEmptyArgsInstance() {
protected getSessions_result getResult(I iface, getSessions_args args) throws org.apache.thrift.TException {
getSessions_result result = new getSessions_result();
- result.success = iface.getSessions();
+ try {
+ result.success = iface.getSessions();
+ } catch (SafeModeException e) {
+ result.e = e;
+ }
return result;
}
}
@@ -1039,21 +1482,61 @@ protected killSession_args getEmptyArgsInstance() {
protected killSession_result getResult(I iface, killSession_args args) throws org.apache.thrift.TException {
killSession_result result = new killSession_result();
- iface.killSession(args.sessionId);
- return result;
+ try {
+ iface.killSession(args.sessionId);
+ } catch (SafeModeException e) {
+ result.e = e;
+ }
+ return result;
+ }
+ }
+
+ private static class setSafeMode extends org.apache.thrift.ProcessFunction {
+ public setSafeMode() {
+ super("setSafeMode");
+ }
+
+ protected setSafeMode_args getEmptyArgsInstance() {
+ return new setSafeMode_args();
+ }
+
+ protected setSafeMode_result getResult(I iface, setSafeMode_args args) throws org.apache.thrift.TException {
+ setSafeMode_result result = new setSafeMode_result();
+ result.success = iface.setSafeMode(args.safeMode);
+ result.setSuccessIsSet(true);
+ return result;
+ }
+ }
+
+ private static class persistState extends org.apache.thrift.ProcessFunction {
+ public persistState() {
+ super("persistState");
+ }
+
+ protected persistState_args getEmptyArgsInstance() {
+ return new persistState_args();
+ }
+
+ protected persistState_result getResult(I iface, persistState_args args) throws org.apache.thrift.TException {
+ persistState_result result = new persistState_result();
+ result.success = iface.persistState();
+ result.setSuccessIsSet(true);
+ return result;
}
}
}
- public static class getNextSessionId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getNextSessionId_args");
+ public static class getActualPoolInfo_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getActualPoolInfo_args");
+ private static final org.apache.thrift.protocol.TField POOL_INFO_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("poolInfoString", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ public PoolInfoStrings poolInfoString; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
-;
+ POOL_INFO_STRING((short)1, "poolInfoString");
private static final Map byName = new HashMap();
@@ -1068,6 +1551,8 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
+ case 1: // POOL_INFO_STRING
+ return POOL_INFO_STRING;
default:
return null;
}
@@ -1106,37 +1591,88 @@ public String getFieldName() {
return _fieldName;
}
}
+
+ // isset id assignments
+
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.POOL_INFO_STRING, new org.apache.thrift.meta_data.FieldMetaData("poolInfoString", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PoolInfoStrings.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getNextSessionId_args.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getActualPoolInfo_args.class, metaDataMap);
}
- public getNextSessionId_args() {
+ public getActualPoolInfo_args() {
+ }
+
+ public getActualPoolInfo_args(
+ PoolInfoStrings poolInfoString)
+ {
+ this();
+ this.poolInfoString = poolInfoString;
}
/**
* Performs a deep copy on other.
*/
- public getNextSessionId_args(getNextSessionId_args other) {
+ public getActualPoolInfo_args(getActualPoolInfo_args other) {
+ if (other.isSetPoolInfoString()) {
+ this.poolInfoString = new PoolInfoStrings(other.poolInfoString);
+ }
}
- public getNextSessionId_args deepCopy() {
- return new getNextSessionId_args(this);
+ public getActualPoolInfo_args deepCopy() {
+ return new getActualPoolInfo_args(this);
}
@Override
public void clear() {
+ this.poolInfoString = null;
+ }
+
+ public PoolInfoStrings getPoolInfoString() {
+ return this.poolInfoString;
+ }
+
+ public getActualPoolInfo_args setPoolInfoString(PoolInfoStrings poolInfoString) {
+ this.poolInfoString = poolInfoString;
+ return this;
+ }
+
+ public void unsetPoolInfoString() {
+ this.poolInfoString = null;
+ }
+
+ /** Returns true if field poolInfoString is set (has been assigned a value) and false otherwise */
+ public boolean isSetPoolInfoString() {
+ return this.poolInfoString != null;
+ }
+
+ public void setPoolInfoStringIsSet(boolean value) {
+ if (!value) {
+ this.poolInfoString = null;
+ }
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
+ case POOL_INFO_STRING:
+ if (value == null) {
+ unsetPoolInfoString();
+ } else {
+ setPoolInfoString((PoolInfoStrings)value);
+ }
+ break;
+
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
+ case POOL_INFO_STRING:
+ return getPoolInfoString();
+
}
throw new IllegalStateException();
}
@@ -1148,6 +1684,8 @@ public boolean isSet(_Fields field) {
}
switch (field) {
+ case POOL_INFO_STRING:
+ return isSetPoolInfoString();
}
throw new IllegalStateException();
}
@@ -1156,15 +1694,24 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof getNextSessionId_args)
- return this.equals((getNextSessionId_args)that);
+ if (that instanceof getActualPoolInfo_args)
+ return this.equals((getActualPoolInfo_args)that);
return false;
}
- public boolean equals(getNextSessionId_args that) {
+ public boolean equals(getActualPoolInfo_args that) {
if (that == null)
return false;
+ boolean this_present_poolInfoString = true && this.isSetPoolInfoString();
+ boolean that_present_poolInfoString = true && that.isSetPoolInfoString();
+ if (this_present_poolInfoString || that_present_poolInfoString) {
+ if (!(this_present_poolInfoString && that_present_poolInfoString))
+ return false;
+ if (!this.poolInfoString.equals(that.poolInfoString))
+ return false;
+ }
+
return true;
}
@@ -1173,14 +1720,24 @@ public int hashCode() {
return 0;
}
- public int compareTo(getNextSessionId_args other) {
+ public int compareTo(getActualPoolInfo_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- getNextSessionId_args typedOther = (getNextSessionId_args)other;
+ getActualPoolInfo_args typedOther = (getActualPoolInfo_args)other;
+ lastComparison = Boolean.valueOf(isSetPoolInfoString()).compareTo(typedOther.isSetPoolInfoString());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetPoolInfoString()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.poolInfoString, typedOther.poolInfoString);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -1198,6 +1755,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
break;
}
switch (field.id) {
+ case 1: // POOL_INFO_STRING
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.poolInfoString = new PoolInfoStrings();
+ this.poolInfoString.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -1213,15 +1778,27 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
validate();
oprot.writeStructBegin(STRUCT_DESC);
+ if (this.poolInfoString != null) {
+ oprot.writeFieldBegin(POOL_INFO_STRING_FIELD_DESC);
+ this.poolInfoString.write(oprot);
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("getNextSessionId_args(");
+ StringBuilder sb = new StringBuilder("getActualPoolInfo_args(");
boolean first = true;
+ sb.append("poolInfoString:");
+ if (this.poolInfoString == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.poolInfoString);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -1248,16 +1825,22 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class getNextSessionId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getNextSessionId_result");
+ public static class getActualPoolInfo_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getActualPoolInfo_result");
- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
+ private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
+ private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField F_FIELD_DESC = new org.apache.thrift.protocol.TField("f", org.apache.thrift.protocol.TType.STRUCT, (short)2);
- public String success; // required
+ public PoolInfoStrings success; // required
+ public InvalidPoolInfo e; // required
+ public SafeModeException f; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- SUCCESS((short)0, "success");
+ SUCCESS((short)0, "success"),
+ E((short)1, "e"),
+ F((short)2, "f");
private static final Map byName = new HashMap();
@@ -1274,6 +1857,10 @@ public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 0: // SUCCESS
return SUCCESS;
+ case 1: // E
+ return E;
+ case 2: // F
+ return F;
default:
return null;
}
@@ -1319,44 +1906,60 @@ public String getFieldName() {
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "SessionHandle")));
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PoolInfoStrings.class)));
+ tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.F, new org.apache.thrift.meta_data.FieldMetaData("f", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getNextSessionId_result.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getActualPoolInfo_result.class, metaDataMap);
}
- public getNextSessionId_result() {
+ public getActualPoolInfo_result() {
}
- public getNextSessionId_result(
- String success)
+ public getActualPoolInfo_result(
+ PoolInfoStrings success,
+ InvalidPoolInfo e,
+ SafeModeException f)
{
this();
this.success = success;
+ this.e = e;
+ this.f = f;
}
/**
* Performs a deep copy on other.
*/
- public getNextSessionId_result(getNextSessionId_result other) {
+ public getActualPoolInfo_result(getActualPoolInfo_result other) {
if (other.isSetSuccess()) {
- this.success = other.success;
+ this.success = new PoolInfoStrings(other.success);
+ }
+ if (other.isSetE()) {
+ this.e = new InvalidPoolInfo(other.e);
+ }
+ if (other.isSetF()) {
+ this.f = new SafeModeException(other.f);
}
}
- public getNextSessionId_result deepCopy() {
- return new getNextSessionId_result(this);
+ public getActualPoolInfo_result deepCopy() {
+ return new getActualPoolInfo_result(this);
}
@Override
public void clear() {
this.success = null;
+ this.e = null;
+ this.f = null;
}
- public String getSuccess() {
+ public PoolInfoStrings getSuccess() {
return this.success;
}
- public getNextSessionId_result setSuccess(String success) {
+ public getActualPoolInfo_result setSuccess(PoolInfoStrings success) {
this.success = success;
return this;
}
@@ -1376,13 +1979,77 @@ public void setSuccessIsSet(boolean value) {
}
}
+ public InvalidPoolInfo getE() {
+ return this.e;
+ }
+
+ public getActualPoolInfo_result setE(InvalidPoolInfo e) {
+ this.e = e;
+ return this;
+ }
+
+ public void unsetE() {
+ this.e = null;
+ }
+
+ /** Returns true if field e is set (has been assigned a value) and false otherwise */
+ public boolean isSetE() {
+ return this.e != null;
+ }
+
+ public void setEIsSet(boolean value) {
+ if (!value) {
+ this.e = null;
+ }
+ }
+
+ public SafeModeException getF() {
+ return this.f;
+ }
+
+ public getActualPoolInfo_result setF(SafeModeException f) {
+ this.f = f;
+ return this;
+ }
+
+ public void unsetF() {
+ this.f = null;
+ }
+
+ /** Returns true if field f is set (has been assigned a value) and false otherwise */
+ public boolean isSetF() {
+ return this.f != null;
+ }
+
+ public void setFIsSet(boolean value) {
+ if (!value) {
+ this.f = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case SUCCESS:
if (value == null) {
unsetSuccess();
} else {
- setSuccess((String)value);
+ setSuccess((PoolInfoStrings)value);
+ }
+ break;
+
+ case E:
+ if (value == null) {
+ unsetE();
+ } else {
+ setE((InvalidPoolInfo)value);
+ }
+ break;
+
+ case F:
+ if (value == null) {
+ unsetF();
+ } else {
+ setF((SafeModeException)value);
}
break;
@@ -1394,6 +2061,12 @@ public Object getFieldValue(_Fields field) {
case SUCCESS:
return getSuccess();
+ case E:
+ return getE();
+
+ case F:
+ return getF();
+
}
throw new IllegalStateException();
}
@@ -1407,6 +2080,10 @@ public boolean isSet(_Fields field) {
switch (field) {
case SUCCESS:
return isSetSuccess();
+ case E:
+ return isSetE();
+ case F:
+ return isSetF();
}
throw new IllegalStateException();
}
@@ -1415,12 +2092,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof getNextSessionId_result)
- return this.equals((getNextSessionId_result)that);
+ if (that instanceof getActualPoolInfo_result)
+ return this.equals((getActualPoolInfo_result)that);
return false;
}
- public boolean equals(getNextSessionId_result that) {
+ public boolean equals(getActualPoolInfo_result that) {
if (that == null)
return false;
@@ -1433,6 +2110,24 @@ public boolean equals(getNextSessionId_result that) {
return false;
}
+ boolean this_present_e = true && this.isSetE();
+ boolean that_present_e = true && that.isSetE();
+ if (this_present_e || that_present_e) {
+ if (!(this_present_e && that_present_e))
+ return false;
+ if (!this.e.equals(that.e))
+ return false;
+ }
+
+ boolean this_present_f = true && this.isSetF();
+ boolean that_present_f = true && that.isSetF();
+ if (this_present_f || that_present_f) {
+ if (!(this_present_f && that_present_f))
+ return false;
+ if (!this.f.equals(that.f))
+ return false;
+ }
+
return true;
}
@@ -1441,13 +2136,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(getNextSessionId_result other) {
+ public int compareTo(getActualPoolInfo_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- getNextSessionId_result typedOther = (getNextSessionId_result)other;
+ getActualPoolInfo_result typedOther = (getActualPoolInfo_result)other;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess());
if (lastComparison != 0) {
@@ -1459,6 +2154,26 @@ public int compareTo(getNextSessionId_result other) {
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetE()).compareTo(typedOther.isSetE());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetE()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.e, typedOther.e);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetF()).compareTo(typedOther.isSetF());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetF()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.f, typedOther.f);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -1477,8 +2192,25 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
}
switch (field.id) {
case 0: // SUCCESS
- if (field.type == org.apache.thrift.protocol.TType.STRING) {
- this.success = iprot.readString();
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.success = new PoolInfoStrings();
+ this.success.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case 1: // E
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.e = new InvalidPoolInfo();
+ this.e.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case 2: // F
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.f = new SafeModeException();
+ this.f.read(iprot);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -1499,7 +2231,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
if (this.isSetSuccess()) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
- oprot.writeString(this.success);
+ this.success.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetE()) {
+ oprot.writeFieldBegin(E_FIELD_DESC);
+ this.e.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetF()) {
+ oprot.writeFieldBegin(F_FIELD_DESC);
+ this.f.write(oprot);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
@@ -1508,7 +2248,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("getNextSessionId_result(");
+ StringBuilder sb = new StringBuilder("getActualPoolInfo_result(");
boolean first = true;
sb.append("success:");
@@ -1518,6 +2258,22 @@ public String toString() {
sb.append(this.success);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("e:");
+ if (this.e == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.e);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("f:");
+ if (this.f == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.f);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -1544,19 +2300,14 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class sessionStart_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionStart_args");
+ public static class getNextSessionId_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getNextSessionId_args");
- private static final org.apache.thrift.protocol.TField HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("handle", org.apache.thrift.protocol.TType.STRING, (short)1);
- private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT, (short)2);
- public String handle; // required
- public SessionInfo info; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- HANDLE((short)1, "handle"),
- INFO((short)2, "info");
+;
private static final Map byName = new HashMap();
@@ -1571,10 +2322,6 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
- case 1: // HANDLE
- return HANDLE;
- case 2: // INFO
- return INFO;
default:
return null;
}
@@ -1613,131 +2360,37 @@ public String getFieldName() {
return _fieldName;
}
}
-
- // isset id assignments
-
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.HANDLE, new org.apache.thrift.meta_data.FieldMetaData("handle", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "SessionHandle")));
- tmpMap.put(_Fields.INFO, new org.apache.thrift.meta_data.FieldMetaData("info", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SessionInfo.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionStart_args.class, metaDataMap);
- }
-
- public sessionStart_args() {
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getNextSessionId_args.class, metaDataMap);
}
- public sessionStart_args(
- String handle,
- SessionInfo info)
- {
- this();
- this.handle = handle;
- this.info = info;
+ public getNextSessionId_args() {
}
/**
* Performs a deep copy on other.
*/
- public sessionStart_args(sessionStart_args other) {
- if (other.isSetHandle()) {
- this.handle = other.handle;
- }
- if (other.isSetInfo()) {
- this.info = new SessionInfo(other.info);
- }
+ public getNextSessionId_args(getNextSessionId_args other) {
}
- public sessionStart_args deepCopy() {
- return new sessionStart_args(this);
+ public getNextSessionId_args deepCopy() {
+ return new getNextSessionId_args(this);
}
@Override
public void clear() {
- this.handle = null;
- this.info = null;
- }
-
- public String getHandle() {
- return this.handle;
- }
-
- public sessionStart_args setHandle(String handle) {
- this.handle = handle;
- return this;
- }
-
- public void unsetHandle() {
- this.handle = null;
- }
-
- /** Returns true if field handle is set (has been assigned a value) and false otherwise */
- public boolean isSetHandle() {
- return this.handle != null;
- }
-
- public void setHandleIsSet(boolean value) {
- if (!value) {
- this.handle = null;
- }
- }
-
- public SessionInfo getInfo() {
- return this.info;
- }
-
- public sessionStart_args setInfo(SessionInfo info) {
- this.info = info;
- return this;
- }
-
- public void unsetInfo() {
- this.info = null;
- }
-
- /** Returns true if field info is set (has been assigned a value) and false otherwise */
- public boolean isSetInfo() {
- return this.info != null;
- }
-
- public void setInfoIsSet(boolean value) {
- if (!value) {
- this.info = null;
- }
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
- case HANDLE:
- if (value == null) {
- unsetHandle();
- } else {
- setHandle((String)value);
- }
- break;
-
- case INFO:
- if (value == null) {
- unsetInfo();
- } else {
- setInfo((SessionInfo)value);
- }
- break;
-
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
- case HANDLE:
- return getHandle();
-
- case INFO:
- return getInfo();
-
}
throw new IllegalStateException();
}
@@ -1749,10 +2402,6 @@ public boolean isSet(_Fields field) {
}
switch (field) {
- case HANDLE:
- return isSetHandle();
- case INFO:
- return isSetInfo();
}
throw new IllegalStateException();
}
@@ -1761,33 +2410,15 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof sessionStart_args)
- return this.equals((sessionStart_args)that);
+ if (that instanceof getNextSessionId_args)
+ return this.equals((getNextSessionId_args)that);
return false;
}
- public boolean equals(sessionStart_args that) {
+ public boolean equals(getNextSessionId_args that) {
if (that == null)
return false;
- boolean this_present_handle = true && this.isSetHandle();
- boolean that_present_handle = true && that.isSetHandle();
- if (this_present_handle || that_present_handle) {
- if (!(this_present_handle && that_present_handle))
- return false;
- if (!this.handle.equals(that.handle))
- return false;
- }
-
- boolean this_present_info = true && this.isSetInfo();
- boolean that_present_info = true && that.isSetInfo();
- if (this_present_info || that_present_info) {
- if (!(this_present_info && that_present_info))
- return false;
- if (!this.info.equals(that.info))
- return false;
- }
-
return true;
}
@@ -1796,34 +2427,14 @@ public int hashCode() {
return 0;
}
- public int compareTo(sessionStart_args other) {
+ public int compareTo(getNextSessionId_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- sessionStart_args typedOther = (sessionStart_args)other;
+ getNextSessionId_args typedOther = (getNextSessionId_args)other;
- lastComparison = Boolean.valueOf(isSetHandle()).compareTo(typedOther.isSetHandle());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetHandle()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.handle, typedOther.handle);
- if (lastComparison != 0) {
- return lastComparison;
- }
- }
- lastComparison = Boolean.valueOf(isSetInfo()).compareTo(typedOther.isSetInfo());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetInfo()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.info, typedOther.info);
- if (lastComparison != 0) {
- return lastComparison;
- }
- }
return 0;
}
@@ -1841,21 +2452,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
break;
}
switch (field.id) {
- case 1: // HANDLE
- if (field.type == org.apache.thrift.protocol.TType.STRING) {
- this.handle = iprot.readString();
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
- }
- break;
- case 2: // INFO
- if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
- this.info = new SessionInfo();
- this.info.read(iprot);
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
- }
- break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -1871,40 +2467,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
validate();
oprot.writeStructBegin(STRUCT_DESC);
- if (this.handle != null) {
- oprot.writeFieldBegin(HANDLE_FIELD_DESC);
- oprot.writeString(this.handle);
- oprot.writeFieldEnd();
- }
- if (this.info != null) {
- oprot.writeFieldBegin(INFO_FIELD_DESC);
- this.info.write(oprot);
- oprot.writeFieldEnd();
- }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("sessionStart_args(");
+ StringBuilder sb = new StringBuilder("getNextSessionId_args(");
boolean first = true;
- sb.append("handle:");
- if (this.handle == null) {
- sb.append("null");
- } else {
- sb.append(this.handle);
- }
- first = false;
- if (!first) sb.append(", ");
- sb.append("info:");
- if (this.info == null) {
- sb.append("null");
- } else {
- sb.append(this.info);
- }
- first = false;
sb.append(")");
return sb.toString();
}
@@ -1931,14 +2502,14 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class sessionStart_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionStart_result");
+ public static class getNextSessionId_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getNextSessionId_result");
- private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
+ private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0);
private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
- public SessionRegistrationData success; // required
- public InvalidSessionHandle e; // required
+ public String success; // required
+ public SafeModeException e; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
@@ -2007,19 +2578,19 @@ public String getFieldName() {
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SessionRegistrationData.class)));
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "SessionHandle")));
tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionStart_result.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getNextSessionId_result.class, metaDataMap);
}
- public sessionStart_result() {
+ public getNextSessionId_result() {
}
- public sessionStart_result(
- SessionRegistrationData success,
- InvalidSessionHandle e)
+ public getNextSessionId_result(
+ String success,
+ SafeModeException e)
{
this();
this.success = success;
@@ -2029,17 +2600,17 @@ public sessionStart_result(
/**
* Performs a deep copy on other.
*/
- public sessionStart_result(sessionStart_result other) {
+ public getNextSessionId_result(getNextSessionId_result other) {
if (other.isSetSuccess()) {
- this.success = new SessionRegistrationData(other.success);
+ this.success = other.success;
}
if (other.isSetE()) {
- this.e = new InvalidSessionHandle(other.e);
+ this.e = new SafeModeException(other.e);
}
}
- public sessionStart_result deepCopy() {
- return new sessionStart_result(this);
+ public getNextSessionId_result deepCopy() {
+ return new getNextSessionId_result(this);
}
@Override
@@ -2048,11 +2619,11 @@ public void clear() {
this.e = null;
}
- public SessionRegistrationData getSuccess() {
+ public String getSuccess() {
return this.success;
}
- public sessionStart_result setSuccess(SessionRegistrationData success) {
+ public getNextSessionId_result setSuccess(String success) {
this.success = success;
return this;
}
@@ -2072,11 +2643,11 @@ public void setSuccessIsSet(boolean value) {
}
}
- public InvalidSessionHandle getE() {
+ public SafeModeException getE() {
return this.e;
}
- public sessionStart_result setE(InvalidSessionHandle e) {
+ public getNextSessionId_result setE(SafeModeException e) {
this.e = e;
return this;
}
@@ -2102,7 +2673,7 @@ public void setFieldValue(_Fields field, Object value) {
if (value == null) {
unsetSuccess();
} else {
- setSuccess((SessionRegistrationData)value);
+ setSuccess((String)value);
}
break;
@@ -2110,7 +2681,7 @@ public void setFieldValue(_Fields field, Object value) {
if (value == null) {
unsetE();
} else {
- setE((InvalidSessionHandle)value);
+ setE((SafeModeException)value);
}
break;
@@ -2148,12 +2719,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof sessionStart_result)
- return this.equals((sessionStart_result)that);
+ if (that instanceof getNextSessionId_result)
+ return this.equals((getNextSessionId_result)that);
return false;
}
- public boolean equals(sessionStart_result that) {
+ public boolean equals(getNextSessionId_result that) {
if (that == null)
return false;
@@ -2183,13 +2754,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(sessionStart_result other) {
+ public int compareTo(getNextSessionId_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- sessionStart_result typedOther = (sessionStart_result)other;
+ getNextSessionId_result typedOther = (getNextSessionId_result)other;
lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess());
if (lastComparison != 0) {
@@ -2229,16 +2800,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
}
switch (field.id) {
case 0: // SUCCESS
- if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
- this.success = new SessionRegistrationData();
- this.success.read(iprot);
+ if (field.type == org.apache.thrift.protocol.TType.STRING) {
+ this.success = iprot.readString();
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
case 1: // E
if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
- this.e = new InvalidSessionHandle();
+ this.e = new SafeModeException();
this.e.read(iprot);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
@@ -2260,7 +2830,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
if (this.isSetSuccess()) {
oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
- this.success.write(oprot);
+ oprot.writeString(this.success);
oprot.writeFieldEnd();
} else if (this.isSetE()) {
oprot.writeFieldBegin(E_FIELD_DESC);
@@ -2273,7 +2843,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("sessionStart_result(");
+ StringBuilder sb = new StringBuilder("getNextSessionId_result(");
boolean first = true;
sb.append("success:");
@@ -2317,8 +2887,8 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class sessionUpdateInfo_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionUpdateInfo_args");
+ public static class sessionStart_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionStart_args");
private static final org.apache.thrift.protocol.TField HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("handle", org.apache.thrift.protocol.TType.STRING, (short)1);
private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT, (short)2);
@@ -2397,13 +2967,13 @@ public String getFieldName() {
tmpMap.put(_Fields.INFO, new org.apache.thrift.meta_data.FieldMetaData("info", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SessionInfo.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionUpdateInfo_args.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionStart_args.class, metaDataMap);
}
- public sessionUpdateInfo_args() {
+ public sessionStart_args() {
}
- public sessionUpdateInfo_args(
+ public sessionStart_args(
String handle,
SessionInfo info)
{
@@ -2415,7 +2985,7 @@ public sessionUpdateInfo_args(
/**
* Performs a deep copy on other.
*/
- public sessionUpdateInfo_args(sessionUpdateInfo_args other) {
+ public sessionStart_args(sessionStart_args other) {
if (other.isSetHandle()) {
this.handle = other.handle;
}
@@ -2424,8 +2994,8 @@ public sessionUpdateInfo_args(sessionUpdateInfo_args other) {
}
}
- public sessionUpdateInfo_args deepCopy() {
- return new sessionUpdateInfo_args(this);
+ public sessionStart_args deepCopy() {
+ return new sessionStart_args(this);
}
@Override
@@ -2438,7 +3008,7 @@ public String getHandle() {
return this.handle;
}
- public sessionUpdateInfo_args setHandle(String handle) {
+ public sessionStart_args setHandle(String handle) {
this.handle = handle;
return this;
}
@@ -2462,7 +3032,7 @@ public SessionInfo getInfo() {
return this.info;
}
- public sessionUpdateInfo_args setInfo(SessionInfo info) {
+ public sessionStart_args setInfo(SessionInfo info) {
this.info = info;
return this;
}
@@ -2534,12 +3104,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof sessionUpdateInfo_args)
- return this.equals((sessionUpdateInfo_args)that);
+ if (that instanceof sessionStart_args)
+ return this.equals((sessionStart_args)that);
return false;
}
- public boolean equals(sessionUpdateInfo_args that) {
+ public boolean equals(sessionStart_args that) {
if (that == null)
return false;
@@ -2569,13 +3139,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(sessionUpdateInfo_args other) {
+ public int compareTo(sessionStart_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- sessionUpdateInfo_args typedOther = (sessionUpdateInfo_args)other;
+ sessionStart_args typedOther = (sessionStart_args)other;
lastComparison = Boolean.valueOf(isSetHandle()).compareTo(typedOther.isSetHandle());
if (lastComparison != 0) {
@@ -2660,7 +3230,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("sessionUpdateInfo_args(");
+ StringBuilder sb = new StringBuilder("sessionStart_args(");
boolean first = true;
sb.append("handle:");
@@ -2704,16 +3274,22 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class sessionUpdateInfo_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionUpdateInfo_result");
+ public static class sessionStart_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionStart_result");
+ private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField F_FIELD_DESC = new org.apache.thrift.protocol.TField("f", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+ public SessionRegistrationData success; // required
public InvalidSessionHandle e; // required
+ public SafeModeException f; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- E((short)1, "e");
+ SUCCESS((short)0, "success"),
+ E((short)1, "e"),
+ F((short)2, "f");
private static final Map byName = new HashMap();
@@ -2728,8 +3304,12 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
+ case 0: // SUCCESS
+ return SUCCESS;
case 1: // E
return E;
+ case 2: // F
+ return F;
default:
return null;
}
@@ -2774,45 +3354,85 @@ public String getFieldName() {
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SessionRegistrationData.class)));
tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.F, new org.apache.thrift.meta_data.FieldMetaData("f", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionUpdateInfo_result.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionStart_result.class, metaDataMap);
}
- public sessionUpdateInfo_result() {
+ public sessionStart_result() {
}
- public sessionUpdateInfo_result(
- InvalidSessionHandle e)
+ public sessionStart_result(
+ SessionRegistrationData success,
+ InvalidSessionHandle e,
+ SafeModeException f)
{
this();
+ this.success = success;
this.e = e;
+ this.f = f;
}
/**
* Performs a deep copy on other.
*/
- public sessionUpdateInfo_result(sessionUpdateInfo_result other) {
+ public sessionStart_result(sessionStart_result other) {
+ if (other.isSetSuccess()) {
+ this.success = new SessionRegistrationData(other.success);
+ }
if (other.isSetE()) {
this.e = new InvalidSessionHandle(other.e);
}
+ if (other.isSetF()) {
+ this.f = new SafeModeException(other.f);
+ }
}
- public sessionUpdateInfo_result deepCopy() {
- return new sessionUpdateInfo_result(this);
+ public sessionStart_result deepCopy() {
+ return new sessionStart_result(this);
}
@Override
public void clear() {
+ this.success = null;
this.e = null;
+ this.f = null;
+ }
+
+ public SessionRegistrationData getSuccess() {
+ return this.success;
+ }
+
+ public sessionStart_result setSuccess(SessionRegistrationData success) {
+ this.success = success;
+ return this;
+ }
+
+ public void unsetSuccess() {
+ this.success = null;
+ }
+
+ /** Returns true if field success is set (has been assigned a value) and false otherwise */
+ public boolean isSetSuccess() {
+ return this.success != null;
+ }
+
+ public void setSuccessIsSet(boolean value) {
+ if (!value) {
+ this.success = null;
+ }
}
public InvalidSessionHandle getE() {
return this.e;
}
- public sessionUpdateInfo_result setE(InvalidSessionHandle e) {
+ public sessionStart_result setE(InvalidSessionHandle e) {
this.e = e;
return this;
}
@@ -2832,8 +3452,40 @@ public void setEIsSet(boolean value) {
}
}
+ public SafeModeException getF() {
+ return this.f;
+ }
+
+ public sessionStart_result setF(SafeModeException f) {
+ this.f = f;
+ return this;
+ }
+
+ public void unsetF() {
+ this.f = null;
+ }
+
+ /** Returns true if field f is set (has been assigned a value) and false otherwise */
+ public boolean isSetF() {
+ return this.f != null;
+ }
+
+ public void setFIsSet(boolean value) {
+ if (!value) {
+ this.f = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
+ case SUCCESS:
+ if (value == null) {
+ unsetSuccess();
+ } else {
+ setSuccess((SessionRegistrationData)value);
+ }
+ break;
+
case E:
if (value == null) {
unsetE();
@@ -2842,14 +3494,28 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
+ case F:
+ if (value == null) {
+ unsetF();
+ } else {
+ setF((SafeModeException)value);
+ }
+ break;
+
}
}
public Object getFieldValue(_Fields field) {
switch (field) {
+ case SUCCESS:
+ return getSuccess();
+
case E:
return getE();
+ case F:
+ return getF();
+
}
throw new IllegalStateException();
}
@@ -2861,8 +3527,12 @@ public boolean isSet(_Fields field) {
}
switch (field) {
+ case SUCCESS:
+ return isSetSuccess();
case E:
return isSetE();
+ case F:
+ return isSetF();
}
throw new IllegalStateException();
}
@@ -2871,15 +3541,24 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof sessionUpdateInfo_result)
- return this.equals((sessionUpdateInfo_result)that);
+ if (that instanceof sessionStart_result)
+ return this.equals((sessionStart_result)that);
return false;
}
- public boolean equals(sessionUpdateInfo_result that) {
+ public boolean equals(sessionStart_result that) {
if (that == null)
return false;
+ boolean this_present_success = true && this.isSetSuccess();
+ boolean that_present_success = true && that.isSetSuccess();
+ if (this_present_success || that_present_success) {
+ if (!(this_present_success && that_present_success))
+ return false;
+ if (!this.success.equals(that.success))
+ return false;
+ }
+
boolean this_present_e = true && this.isSetE();
boolean that_present_e = true && that.isSetE();
if (this_present_e || that_present_e) {
@@ -2889,6 +3568,15 @@ public boolean equals(sessionUpdateInfo_result that) {
return false;
}
+ boolean this_present_f = true && this.isSetF();
+ boolean that_present_f = true && that.isSetF();
+ if (this_present_f || that_present_f) {
+ if (!(this_present_f && that_present_f))
+ return false;
+ if (!this.f.equals(that.f))
+ return false;
+ }
+
return true;
}
@@ -2897,14 +3585,24 @@ public int hashCode() {
return 0;
}
- public int compareTo(sessionUpdateInfo_result other) {
+ public int compareTo(sessionStart_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- sessionUpdateInfo_result typedOther = (sessionUpdateInfo_result)other;
+ sessionStart_result typedOther = (sessionStart_result)other;
+ lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetSuccess()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
lastComparison = Boolean.valueOf(isSetE()).compareTo(typedOther.isSetE());
if (lastComparison != 0) {
return lastComparison;
@@ -2915,6 +3613,16 @@ public int compareTo(sessionUpdateInfo_result other) {
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetF()).compareTo(typedOther.isSetF());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetF()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.f, typedOther.f);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -2932,6 +3640,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
break;
}
switch (field.id) {
+ case 0: // SUCCESS
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.success = new SessionRegistrationData();
+ this.success.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
case 1: // E
if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
this.e = new InvalidSessionHandle();
@@ -2940,6 +3656,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
+ case 2: // F
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.f = new SafeModeException();
+ this.f.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -2954,10 +3678,18 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
oprot.writeStructBegin(STRUCT_DESC);
- if (this.isSetE()) {
+ if (this.isSetSuccess()) {
+ oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+ this.success.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetE()) {
oprot.writeFieldBegin(E_FIELD_DESC);
this.e.write(oprot);
oprot.writeFieldEnd();
+ } else if (this.isSetF()) {
+ oprot.writeFieldBegin(F_FIELD_DESC);
+ this.f.write(oprot);
+ oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
@@ -2965,9 +3697,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("sessionUpdateInfo_result(");
+ StringBuilder sb = new StringBuilder("sessionStart_result(");
boolean first = true;
+ sb.append("success:");
+ if (this.success == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.success);
+ }
+ first = false;
+ if (!first) sb.append(", ");
sb.append("e:");
if (this.e == null) {
sb.append("null");
@@ -2975,6 +3715,14 @@ public String toString() {
sb.append(this.e);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("f:");
+ if (this.f == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.f);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -3001,27 +3749,19 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class sessionEnd_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionEnd_args");
+ public static class sessionUpdateInfo_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionUpdateInfo_args");
private static final org.apache.thrift.protocol.TField HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("handle", org.apache.thrift.protocol.TType.STRING, (short)1);
- private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.I32, (short)2);
+ private static final org.apache.thrift.protocol.TField INFO_FIELD_DESC = new org.apache.thrift.protocol.TField("info", org.apache.thrift.protocol.TType.STRUCT, (short)2);
public String handle; // required
- /**
- *
- * @see SessionStatus
- */
- public SessionStatus status; // required
+ public SessionInfo info; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
HANDLE((short)1, "handle"),
- /**
- *
- * @see SessionStatus
- */
- STATUS((short)2, "status");
+ INFO((short)2, "info");
private static final Map byName = new HashMap();
@@ -3038,8 +3778,8 @@ public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // HANDLE
return HANDLE;
- case 2: // STATUS
- return STATUS;
+ case 2: // INFO
+ return INFO;
default:
return null;
}
@@ -3086,51 +3826,51 @@ public String getFieldName() {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.HANDLE, new org.apache.thrift.meta_data.FieldMetaData("handle", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "SessionHandle")));
- tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SessionStatus.class)));
+ tmpMap.put(_Fields.INFO, new org.apache.thrift.meta_data.FieldMetaData("info", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SessionInfo.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionEnd_args.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionUpdateInfo_args.class, metaDataMap);
}
- public sessionEnd_args() {
+ public sessionUpdateInfo_args() {
}
- public sessionEnd_args(
+ public sessionUpdateInfo_args(
String handle,
- SessionStatus status)
+ SessionInfo info)
{
this();
this.handle = handle;
- this.status = status;
+ this.info = info;
}
/**
* Performs a deep copy on other.
*/
- public sessionEnd_args(sessionEnd_args other) {
+ public sessionUpdateInfo_args(sessionUpdateInfo_args other) {
if (other.isSetHandle()) {
this.handle = other.handle;
}
- if (other.isSetStatus()) {
- this.status = other.status;
+ if (other.isSetInfo()) {
+ this.info = new SessionInfo(other.info);
}
}
- public sessionEnd_args deepCopy() {
- return new sessionEnd_args(this);
+ public sessionUpdateInfo_args deepCopy() {
+ return new sessionUpdateInfo_args(this);
}
@Override
public void clear() {
this.handle = null;
- this.status = null;
+ this.info = null;
}
public String getHandle() {
return this.handle;
}
- public sessionEnd_args setHandle(String handle) {
+ public sessionUpdateInfo_args setHandle(String handle) {
this.handle = handle;
return this;
}
@@ -3150,35 +3890,27 @@ public void setHandleIsSet(boolean value) {
}
}
- /**
- *
- * @see SessionStatus
- */
- public SessionStatus getStatus() {
- return this.status;
+ public SessionInfo getInfo() {
+ return this.info;
}
- /**
- *
- * @see SessionStatus
- */
- public sessionEnd_args setStatus(SessionStatus status) {
- this.status = status;
- return this;
+ public sessionUpdateInfo_args setInfo(SessionInfo info) {
+ this.info = info;
+ return this;
}
- public void unsetStatus() {
- this.status = null;
+ public void unsetInfo() {
+ this.info = null;
}
- /** Returns true if field status is set (has been assigned a value) and false otherwise */
- public boolean isSetStatus() {
- return this.status != null;
+ /** Returns true if field info is set (has been assigned a value) and false otherwise */
+ public boolean isSetInfo() {
+ return this.info != null;
}
- public void setStatusIsSet(boolean value) {
+ public void setInfoIsSet(boolean value) {
if (!value) {
- this.status = null;
+ this.info = null;
}
}
@@ -3192,11 +3924,11 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
- case STATUS:
+ case INFO:
if (value == null) {
- unsetStatus();
+ unsetInfo();
} else {
- setStatus((SessionStatus)value);
+ setInfo((SessionInfo)value);
}
break;
@@ -3208,8 +3940,8 @@ public Object getFieldValue(_Fields field) {
case HANDLE:
return getHandle();
- case STATUS:
- return getStatus();
+ case INFO:
+ return getInfo();
}
throw new IllegalStateException();
@@ -3224,8 +3956,8 @@ public boolean isSet(_Fields field) {
switch (field) {
case HANDLE:
return isSetHandle();
- case STATUS:
- return isSetStatus();
+ case INFO:
+ return isSetInfo();
}
throw new IllegalStateException();
}
@@ -3234,12 +3966,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof sessionEnd_args)
- return this.equals((sessionEnd_args)that);
+ if (that instanceof sessionUpdateInfo_args)
+ return this.equals((sessionUpdateInfo_args)that);
return false;
}
- public boolean equals(sessionEnd_args that) {
+ public boolean equals(sessionUpdateInfo_args that) {
if (that == null)
return false;
@@ -3252,12 +3984,12 @@ public boolean equals(sessionEnd_args that) {
return false;
}
- boolean this_present_status = true && this.isSetStatus();
- boolean that_present_status = true && that.isSetStatus();
- if (this_present_status || that_present_status) {
- if (!(this_present_status && that_present_status))
+ boolean this_present_info = true && this.isSetInfo();
+ boolean that_present_info = true && that.isSetInfo();
+ if (this_present_info || that_present_info) {
+ if (!(this_present_info && that_present_info))
return false;
- if (!this.status.equals(that.status))
+ if (!this.info.equals(that.info))
return false;
}
@@ -3269,13 +4001,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(sessionEnd_args other) {
+ public int compareTo(sessionUpdateInfo_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- sessionEnd_args typedOther = (sessionEnd_args)other;
+ sessionUpdateInfo_args typedOther = (sessionUpdateInfo_args)other;
lastComparison = Boolean.valueOf(isSetHandle()).compareTo(typedOther.isSetHandle());
if (lastComparison != 0) {
@@ -3287,12 +4019,12 @@ public int compareTo(sessionEnd_args other) {
return lastComparison;
}
}
- lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus());
+ lastComparison = Boolean.valueOf(isSetInfo()).compareTo(typedOther.isSetInfo());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetStatus()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status);
+ if (isSetInfo()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.info, typedOther.info);
if (lastComparison != 0) {
return lastComparison;
}
@@ -3321,9 +4053,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
- case 2: // STATUS
- if (field.type == org.apache.thrift.protocol.TType.I32) {
- this.status = SessionStatus.findByValue(iprot.readI32());
+ case 2: // INFO
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.info = new SessionInfo();
+ this.info.read(iprot);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -3348,9 +4081,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
oprot.writeString(this.handle);
oprot.writeFieldEnd();
}
- if (this.status != null) {
- oprot.writeFieldBegin(STATUS_FIELD_DESC);
- oprot.writeI32(this.status.getValue());
+ if (this.info != null) {
+ oprot.writeFieldBegin(INFO_FIELD_DESC);
+ this.info.write(oprot);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
@@ -3359,7 +4092,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("sessionEnd_args(");
+ StringBuilder sb = new StringBuilder("sessionUpdateInfo_args(");
boolean first = true;
sb.append("handle:");
@@ -3370,11 +4103,11 @@ public String toString() {
}
first = false;
if (!first) sb.append(", ");
- sb.append("status:");
- if (this.status == null) {
+ sb.append("info:");
+ if (this.info == null) {
sb.append("null");
} else {
- sb.append(this.status);
+ sb.append(this.info);
}
first = false;
sb.append(")");
@@ -3403,16 +4136,19 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class sessionEnd_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionEnd_result");
+ public static class sessionUpdateInfo_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionUpdateInfo_result");
private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField F_FIELD_DESC = new org.apache.thrift.protocol.TField("f", org.apache.thrift.protocol.TType.STRUCT, (short)2);
public InvalidSessionHandle e; // required
+ public SafeModeException f; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- E((short)1, "e");
+ E((short)1, "e"),
+ F((short)2, "f");
private static final Map byName = new HashMap();
@@ -3429,6 +4165,8 @@ public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // E
return E;
+ case 2: // F
+ return F;
default:
return null;
}
@@ -3475,43 +4213,51 @@ public String getFieldName() {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.F, new org.apache.thrift.meta_data.FieldMetaData("f", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionEnd_result.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionUpdateInfo_result.class, metaDataMap);
}
- public sessionEnd_result() {
+ public sessionUpdateInfo_result() {
}
- public sessionEnd_result(
- InvalidSessionHandle e)
+ public sessionUpdateInfo_result(
+ InvalidSessionHandle e,
+ SafeModeException f)
{
this();
this.e = e;
+ this.f = f;
}
/**
* Performs a deep copy on other.
*/
- public sessionEnd_result(sessionEnd_result other) {
+ public sessionUpdateInfo_result(sessionUpdateInfo_result other) {
if (other.isSetE()) {
this.e = new InvalidSessionHandle(other.e);
}
+ if (other.isSetF()) {
+ this.f = new SafeModeException(other.f);
+ }
}
- public sessionEnd_result deepCopy() {
- return new sessionEnd_result(this);
+ public sessionUpdateInfo_result deepCopy() {
+ return new sessionUpdateInfo_result(this);
}
@Override
public void clear() {
this.e = null;
+ this.f = null;
}
public InvalidSessionHandle getE() {
return this.e;
}
- public sessionEnd_result setE(InvalidSessionHandle e) {
+ public sessionUpdateInfo_result setE(InvalidSessionHandle e) {
this.e = e;
return this;
}
@@ -3531,6 +4277,30 @@ public void setEIsSet(boolean value) {
}
}
+ public SafeModeException getF() {
+ return this.f;
+ }
+
+ public sessionUpdateInfo_result setF(SafeModeException f) {
+ this.f = f;
+ return this;
+ }
+
+ public void unsetF() {
+ this.f = null;
+ }
+
+ /** Returns true if field f is set (has been assigned a value) and false otherwise */
+ public boolean isSetF() {
+ return this.f != null;
+ }
+
+ public void setFIsSet(boolean value) {
+ if (!value) {
+ this.f = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case E:
@@ -3541,6 +4311,14 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
+ case F:
+ if (value == null) {
+ unsetF();
+ } else {
+ setF((SafeModeException)value);
+ }
+ break;
+
}
}
@@ -3549,6 +4327,9 @@ public Object getFieldValue(_Fields field) {
case E:
return getE();
+ case F:
+ return getF();
+
}
throw new IllegalStateException();
}
@@ -3562,6 +4343,8 @@ public boolean isSet(_Fields field) {
switch (field) {
case E:
return isSetE();
+ case F:
+ return isSetF();
}
throw new IllegalStateException();
}
@@ -3570,12 +4353,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof sessionEnd_result)
- return this.equals((sessionEnd_result)that);
+ if (that instanceof sessionUpdateInfo_result)
+ return this.equals((sessionUpdateInfo_result)that);
return false;
}
- public boolean equals(sessionEnd_result that) {
+ public boolean equals(sessionUpdateInfo_result that) {
if (that == null)
return false;
@@ -3588,6 +4371,15 @@ public boolean equals(sessionEnd_result that) {
return false;
}
+ boolean this_present_f = true && this.isSetF();
+ boolean that_present_f = true && that.isSetF();
+ if (this_present_f || that_present_f) {
+ if (!(this_present_f && that_present_f))
+ return false;
+ if (!this.f.equals(that.f))
+ return false;
+ }
+
return true;
}
@@ -3596,13 +4388,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(sessionEnd_result other) {
+ public int compareTo(sessionUpdateInfo_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- sessionEnd_result typedOther = (sessionEnd_result)other;
+ sessionUpdateInfo_result typedOther = (sessionUpdateInfo_result)other;
lastComparison = Boolean.valueOf(isSetE()).compareTo(typedOther.isSetE());
if (lastComparison != 0) {
@@ -3614,6 +4406,16 @@ public int compareTo(sessionEnd_result other) {
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetF()).compareTo(typedOther.isSetF());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetF()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.f, typedOther.f);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -3639,6 +4441,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
+ case 2: // F
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.f = new SafeModeException();
+ this.f.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -3657,6 +4467,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
oprot.writeFieldBegin(E_FIELD_DESC);
this.e.write(oprot);
oprot.writeFieldEnd();
+ } else if (this.isSetF()) {
+ oprot.writeFieldBegin(F_FIELD_DESC);
+ this.f.write(oprot);
+ oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
@@ -3664,7 +4478,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("sessionEnd_result(");
+ StringBuilder sb = new StringBuilder("sessionUpdateInfo_result(");
boolean first = true;
sb.append("e:");
@@ -3674,6 +4488,14 @@ public String toString() {
sb.append(this.e);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("f:");
+ if (this.f == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.f);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -3700,16 +4522,27 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class sessionHeartbeat_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionHeartbeat_args");
+ public static class sessionEnd_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionEnd_args");
private static final org.apache.thrift.protocol.TField HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("handle", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.I32, (short)2);
public String handle; // required
+ /**
+ *
+ * @see SessionStatus
+ */
+ public SessionStatus status; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- HANDLE((short)1, "handle");
+ HANDLE((short)1, "handle"),
+ /**
+ *
+ * @see SessionStatus
+ */
+ STATUS((short)2, "status");
private static final Map byName = new HashMap();
@@ -3726,6 +4559,8 @@ public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // HANDLE
return HANDLE;
+ case 2: // STATUS
+ return STATUS;
default:
return null;
}
@@ -3772,43 +4607,51 @@ public String getFieldName() {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.HANDLE, new org.apache.thrift.meta_data.FieldMetaData("handle", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "SessionHandle")));
+ tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, SessionStatus.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionHeartbeat_args.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionEnd_args.class, metaDataMap);
}
- public sessionHeartbeat_args() {
+ public sessionEnd_args() {
}
- public sessionHeartbeat_args(
- String handle)
+ public sessionEnd_args(
+ String handle,
+ SessionStatus status)
{
this();
this.handle = handle;
+ this.status = status;
}
/**
* Performs a deep copy on other.
*/
- public sessionHeartbeat_args(sessionHeartbeat_args other) {
+ public sessionEnd_args(sessionEnd_args other) {
if (other.isSetHandle()) {
this.handle = other.handle;
}
+ if (other.isSetStatus()) {
+ this.status = other.status;
+ }
}
- public sessionHeartbeat_args deepCopy() {
- return new sessionHeartbeat_args(this);
+ public sessionEnd_args deepCopy() {
+ return new sessionEnd_args(this);
}
@Override
public void clear() {
this.handle = null;
+ this.status = null;
}
public String getHandle() {
return this.handle;
}
- public sessionHeartbeat_args setHandle(String handle) {
+ public sessionEnd_args setHandle(String handle) {
this.handle = handle;
return this;
}
@@ -3828,6 +4671,38 @@ public void setHandleIsSet(boolean value) {
}
}
+ /**
+ *
+ * @see SessionStatus
+ */
+ public SessionStatus getStatus() {
+ return this.status;
+ }
+
+ /**
+ *
+ * @see SessionStatus
+ */
+ public sessionEnd_args setStatus(SessionStatus status) {
+ this.status = status;
+ return this;
+ }
+
+ public void unsetStatus() {
+ this.status = null;
+ }
+
+ /** Returns true if field status is set (has been assigned a value) and false otherwise */
+ public boolean isSetStatus() {
+ return this.status != null;
+ }
+
+ public void setStatusIsSet(boolean value) {
+ if (!value) {
+ this.status = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case HANDLE:
@@ -3838,6 +4713,14 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
+ case STATUS:
+ if (value == null) {
+ unsetStatus();
+ } else {
+ setStatus((SessionStatus)value);
+ }
+ break;
+
}
}
@@ -3846,6 +4729,9 @@ public Object getFieldValue(_Fields field) {
case HANDLE:
return getHandle();
+ case STATUS:
+ return getStatus();
+
}
throw new IllegalStateException();
}
@@ -3859,6 +4745,8 @@ public boolean isSet(_Fields field) {
switch (field) {
case HANDLE:
return isSetHandle();
+ case STATUS:
+ return isSetStatus();
}
throw new IllegalStateException();
}
@@ -3867,12 +4755,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof sessionHeartbeat_args)
- return this.equals((sessionHeartbeat_args)that);
+ if (that instanceof sessionEnd_args)
+ return this.equals((sessionEnd_args)that);
return false;
}
- public boolean equals(sessionHeartbeat_args that) {
+ public boolean equals(sessionEnd_args that) {
if (that == null)
return false;
@@ -3885,6 +4773,15 @@ public boolean equals(sessionHeartbeat_args that) {
return false;
}
+ boolean this_present_status = true && this.isSetStatus();
+ boolean that_present_status = true && that.isSetStatus();
+ if (this_present_status || that_present_status) {
+ if (!(this_present_status && that_present_status))
+ return false;
+ if (!this.status.equals(that.status))
+ return false;
+ }
+
return true;
}
@@ -3893,13 +4790,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(sessionHeartbeat_args other) {
+ public int compareTo(sessionEnd_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- sessionHeartbeat_args typedOther = (sessionHeartbeat_args)other;
+ sessionEnd_args typedOther = (sessionEnd_args)other;
lastComparison = Boolean.valueOf(isSetHandle()).compareTo(typedOther.isSetHandle());
if (lastComparison != 0) {
@@ -3911,6 +4808,16 @@ public int compareTo(sessionHeartbeat_args other) {
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetStatus()).compareTo(typedOther.isSetStatus());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetStatus()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, typedOther.status);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -3935,6 +4842,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
+ case 2: // STATUS
+ if (field.type == org.apache.thrift.protocol.TType.I32) {
+ this.status = SessionStatus.findByValue(iprot.readI32());
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -3955,13 +4869,18 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
oprot.writeString(this.handle);
oprot.writeFieldEnd();
}
+ if (this.status != null) {
+ oprot.writeFieldBegin(STATUS_FIELD_DESC);
+ oprot.writeI32(this.status.getValue());
+ oprot.writeFieldEnd();
+ }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("sessionHeartbeat_args(");
+ StringBuilder sb = new StringBuilder("sessionEnd_args(");
boolean first = true;
sb.append("handle:");
@@ -3971,6 +4890,14 @@ public String toString() {
sb.append(this.handle);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("status:");
+ if (this.status == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.status);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -3997,16 +4924,19 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class sessionHeartbeat_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionHeartbeat_result");
+ public static class sessionEnd_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionEnd_result");
private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField F_FIELD_DESC = new org.apache.thrift.protocol.TField("f", org.apache.thrift.protocol.TType.STRUCT, (short)2);
public InvalidSessionHandle e; // required
+ public SafeModeException f; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- E((short)1, "e");
+ E((short)1, "e"),
+ F((short)2, "f");
private static final Map byName = new HashMap();
@@ -4023,6 +4953,8 @@ public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // E
return E;
+ case 2: // F
+ return F;
default:
return null;
}
@@ -4069,43 +5001,51 @@ public String getFieldName() {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.F, new org.apache.thrift.meta_data.FieldMetaData("f", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionHeartbeat_result.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionEnd_result.class, metaDataMap);
}
- public sessionHeartbeat_result() {
+ public sessionEnd_result() {
}
- public sessionHeartbeat_result(
- InvalidSessionHandle e)
+ public sessionEnd_result(
+ InvalidSessionHandle e,
+ SafeModeException f)
{
this();
this.e = e;
+ this.f = f;
}
/**
* Performs a deep copy on other.
*/
- public sessionHeartbeat_result(sessionHeartbeat_result other) {
+ public sessionEnd_result(sessionEnd_result other) {
if (other.isSetE()) {
this.e = new InvalidSessionHandle(other.e);
}
+ if (other.isSetF()) {
+ this.f = new SafeModeException(other.f);
+ }
}
- public sessionHeartbeat_result deepCopy() {
- return new sessionHeartbeat_result(this);
+ public sessionEnd_result deepCopy() {
+ return new sessionEnd_result(this);
}
@Override
public void clear() {
this.e = null;
+ this.f = null;
}
public InvalidSessionHandle getE() {
return this.e;
}
- public sessionHeartbeat_result setE(InvalidSessionHandle e) {
+ public sessionEnd_result setE(InvalidSessionHandle e) {
this.e = e;
return this;
}
@@ -4125,6 +5065,30 @@ public void setEIsSet(boolean value) {
}
}
+ public SafeModeException getF() {
+ return this.f;
+ }
+
+ public sessionEnd_result setF(SafeModeException f) {
+ this.f = f;
+ return this;
+ }
+
+ public void unsetF() {
+ this.f = null;
+ }
+
+ /** Returns true if field f is set (has been assigned a value) and false otherwise */
+ public boolean isSetF() {
+ return this.f != null;
+ }
+
+ public void setFIsSet(boolean value) {
+ if (!value) {
+ this.f = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case E:
@@ -4135,6 +5099,14 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
+ case F:
+ if (value == null) {
+ unsetF();
+ } else {
+ setF((SafeModeException)value);
+ }
+ break;
+
}
}
@@ -4143,6 +5115,9 @@ public Object getFieldValue(_Fields field) {
case E:
return getE();
+ case F:
+ return getF();
+
}
throw new IllegalStateException();
}
@@ -4156,6 +5131,8 @@ public boolean isSet(_Fields field) {
switch (field) {
case E:
return isSetE();
+ case F:
+ return isSetF();
}
throw new IllegalStateException();
}
@@ -4164,12 +5141,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof sessionHeartbeat_result)
- return this.equals((sessionHeartbeat_result)that);
+ if (that instanceof sessionEnd_result)
+ return this.equals((sessionEnd_result)that);
return false;
}
- public boolean equals(sessionHeartbeat_result that) {
+ public boolean equals(sessionEnd_result that) {
if (that == null)
return false;
@@ -4182,6 +5159,15 @@ public boolean equals(sessionHeartbeat_result that) {
return false;
}
+ boolean this_present_f = true && this.isSetF();
+ boolean that_present_f = true && that.isSetF();
+ if (this_present_f || that_present_f) {
+ if (!(this_present_f && that_present_f))
+ return false;
+ if (!this.f.equals(that.f))
+ return false;
+ }
+
return true;
}
@@ -4190,13 +5176,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(sessionHeartbeat_result other) {
+ public int compareTo(sessionEnd_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- sessionHeartbeat_result typedOther = (sessionHeartbeat_result)other;
+ sessionEnd_result typedOther = (sessionEnd_result)other;
lastComparison = Boolean.valueOf(isSetE()).compareTo(typedOther.isSetE());
if (lastComparison != 0) {
@@ -4208,6 +5194,16 @@ public int compareTo(sessionHeartbeat_result other) {
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetF()).compareTo(typedOther.isSetF());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetF()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.f, typedOther.f);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -4233,6 +5229,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
+ case 2: // F
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.f = new SafeModeException();
+ this.f.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -4251,6 +5255,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
oprot.writeFieldBegin(E_FIELD_DESC);
this.e.write(oprot);
oprot.writeFieldEnd();
+ } else if (this.isSetF()) {
+ oprot.writeFieldBegin(F_FIELD_DESC);
+ this.f.write(oprot);
+ oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
@@ -4258,7 +5266,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("sessionHeartbeat_result(");
+ StringBuilder sb = new StringBuilder("sessionEnd_result(");
boolean first = true;
sb.append("e:");
@@ -4268,6 +5276,14 @@ public String toString() {
sb.append(this.e);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("f:");
+ if (this.f == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.f);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -4294,19 +5310,16 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class requestResource_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("requestResource_args");
+ public static class sessionHeartbeat_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionHeartbeat_args");
private static final org.apache.thrift.protocol.TField HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("handle", org.apache.thrift.protocol.TType.STRING, (short)1);
- private static final org.apache.thrift.protocol.TField REQUEST_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("requestList", org.apache.thrift.protocol.TType.LIST, (short)2);
public String handle; // required
- public List requestList; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- HANDLE((short)1, "handle"),
- REQUEST_LIST((short)2, "requestList");
+ HANDLE((short)1, "handle");
private static final Map byName = new HashMap();
@@ -4323,8 +5336,6 @@ public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // HANDLE
return HANDLE;
- case 2: // REQUEST_LIST
- return REQUEST_LIST;
default:
return null;
}
@@ -4371,56 +5382,43 @@ public String getFieldName() {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.HANDLE, new org.apache.thrift.meta_data.FieldMetaData("handle", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "SessionHandle")));
- tmpMap.put(_Fields.REQUEST_LIST, new org.apache.thrift.meta_data.FieldMetaData("requestList", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ResourceRequest.class))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(requestResource_args.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionHeartbeat_args.class, metaDataMap);
}
- public requestResource_args() {
+ public sessionHeartbeat_args() {
}
- public requestResource_args(
- String handle,
- List requestList)
+ public sessionHeartbeat_args(
+ String handle)
{
this();
this.handle = handle;
- this.requestList = requestList;
}
/**
* Performs a deep copy on other.
*/
- public requestResource_args(requestResource_args other) {
+ public sessionHeartbeat_args(sessionHeartbeat_args other) {
if (other.isSetHandle()) {
this.handle = other.handle;
}
- if (other.isSetRequestList()) {
- List __this__requestList = new ArrayList();
- for (ResourceRequest other_element : other.requestList) {
- __this__requestList.add(new ResourceRequest(other_element));
- }
- this.requestList = __this__requestList;
- }
}
- public requestResource_args deepCopy() {
- return new requestResource_args(this);
+ public sessionHeartbeat_args deepCopy() {
+ return new sessionHeartbeat_args(this);
}
@Override
public void clear() {
this.handle = null;
- this.requestList = null;
}
public String getHandle() {
return this.handle;
}
- public requestResource_args setHandle(String handle) {
+ public sessionHeartbeat_args setHandle(String handle) {
this.handle = handle;
return this;
}
@@ -4440,45 +5438,6 @@ public void setHandleIsSet(boolean value) {
}
}
- public int getRequestListSize() {
- return (this.requestList == null) ? 0 : this.requestList.size();
- }
-
- public java.util.Iterator getRequestListIterator() {
- return (this.requestList == null) ? null : this.requestList.iterator();
- }
-
- public void addToRequestList(ResourceRequest elem) {
- if (this.requestList == null) {
- this.requestList = new ArrayList();
- }
- this.requestList.add(elem);
- }
-
- public List getRequestList() {
- return this.requestList;
- }
-
- public requestResource_args setRequestList(List requestList) {
- this.requestList = requestList;
- return this;
- }
-
- public void unsetRequestList() {
- this.requestList = null;
- }
-
- /** Returns true if field requestList is set (has been assigned a value) and false otherwise */
- public boolean isSetRequestList() {
- return this.requestList != null;
- }
-
- public void setRequestListIsSet(boolean value) {
- if (!value) {
- this.requestList = null;
- }
- }
-
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case HANDLE:
@@ -4489,14 +5448,6 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
- case REQUEST_LIST:
- if (value == null) {
- unsetRequestList();
- } else {
- setRequestList((List)value);
- }
- break;
-
}
}
@@ -4505,9 +5456,6 @@ public Object getFieldValue(_Fields field) {
case HANDLE:
return getHandle();
- case REQUEST_LIST:
- return getRequestList();
-
}
throw new IllegalStateException();
}
@@ -4521,8 +5469,6 @@ public boolean isSet(_Fields field) {
switch (field) {
case HANDLE:
return isSetHandle();
- case REQUEST_LIST:
- return isSetRequestList();
}
throw new IllegalStateException();
}
@@ -4531,12 +5477,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof requestResource_args)
- return this.equals((requestResource_args)that);
+ if (that instanceof sessionHeartbeat_args)
+ return this.equals((sessionHeartbeat_args)that);
return false;
}
- public boolean equals(requestResource_args that) {
+ public boolean equals(sessionHeartbeat_args that) {
if (that == null)
return false;
@@ -4549,15 +5495,6 @@ public boolean equals(requestResource_args that) {
return false;
}
- boolean this_present_requestList = true && this.isSetRequestList();
- boolean that_present_requestList = true && that.isSetRequestList();
- if (this_present_requestList || that_present_requestList) {
- if (!(this_present_requestList && that_present_requestList))
- return false;
- if (!this.requestList.equals(that.requestList))
- return false;
- }
-
return true;
}
@@ -4566,13 +5503,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(requestResource_args other) {
+ public int compareTo(sessionHeartbeat_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- requestResource_args typedOther = (requestResource_args)other;
+ sessionHeartbeat_args typedOther = (sessionHeartbeat_args)other;
lastComparison = Boolean.valueOf(isSetHandle()).compareTo(typedOther.isSetHandle());
if (lastComparison != 0) {
@@ -4584,16 +5521,6 @@ public int compareTo(requestResource_args other) {
return lastComparison;
}
}
- lastComparison = Boolean.valueOf(isSetRequestList()).compareTo(typedOther.isSetRequestList());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetRequestList()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requestList, typedOther.requestList);
- if (lastComparison != 0) {
- return lastComparison;
- }
- }
return 0;
}
@@ -4618,24 +5545,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
- case 2: // REQUEST_LIST
- if (field.type == org.apache.thrift.protocol.TType.LIST) {
- {
- org.apache.thrift.protocol.TList _list30 = iprot.readListBegin();
- this.requestList = new ArrayList(_list30.size);
- for (int _i31 = 0; _i31 < _list30.size; ++_i31)
- {
- ResourceRequest _elem32; // required
- _elem32 = new ResourceRequest();
- _elem32.read(iprot);
- this.requestList.add(_elem32);
- }
- iprot.readListEnd();
- }
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
- }
- break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -4656,25 +5565,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
oprot.writeString(this.handle);
oprot.writeFieldEnd();
}
- if (this.requestList != null) {
- oprot.writeFieldBegin(REQUEST_LIST_FIELD_DESC);
- {
- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.requestList.size()));
- for (ResourceRequest _iter33 : this.requestList)
- {
- _iter33.write(oprot);
- }
- oprot.writeListEnd();
- }
- oprot.writeFieldEnd();
- }
oprot.writeFieldStop();
oprot.writeStructEnd();
}
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("requestResource_args(");
+ StringBuilder sb = new StringBuilder("sessionHeartbeat_args(");
boolean first = true;
sb.append("handle:");
@@ -4684,14 +5581,6 @@ public String toString() {
sb.append(this.handle);
}
first = false;
- if (!first) sb.append(", ");
- sb.append("requestList:");
- if (this.requestList == null) {
- sb.append("null");
- } else {
- sb.append(this.requestList);
- }
- first = false;
sb.append(")");
return sb.toString();
}
@@ -4718,16 +5607,19 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class requestResource_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("requestResource_result");
+ public static class sessionHeartbeat_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionHeartbeat_result");
private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField F_FIELD_DESC = new org.apache.thrift.protocol.TField("f", org.apache.thrift.protocol.TType.STRUCT, (short)2);
public InvalidSessionHandle e; // required
+ public SafeModeException f; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- E((short)1, "e");
+ E((short)1, "e"),
+ F((short)2, "f");
private static final Map byName = new HashMap();
@@ -4744,6 +5636,8 @@ public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // E
return E;
+ case 2: // F
+ return F;
default:
return null;
}
@@ -4790,43 +5684,51 @@ public String getFieldName() {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.F, new org.apache.thrift.meta_data.FieldMetaData("f", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(requestResource_result.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionHeartbeat_result.class, metaDataMap);
}
- public requestResource_result() {
+ public sessionHeartbeat_result() {
}
- public requestResource_result(
- InvalidSessionHandle e)
+ public sessionHeartbeat_result(
+ InvalidSessionHandle e,
+ SafeModeException f)
{
this();
this.e = e;
+ this.f = f;
}
/**
* Performs a deep copy on other.
*/
- public requestResource_result(requestResource_result other) {
+ public sessionHeartbeat_result(sessionHeartbeat_result other) {
if (other.isSetE()) {
this.e = new InvalidSessionHandle(other.e);
}
+ if (other.isSetF()) {
+ this.f = new SafeModeException(other.f);
+ }
}
- public requestResource_result deepCopy() {
- return new requestResource_result(this);
+ public sessionHeartbeat_result deepCopy() {
+ return new sessionHeartbeat_result(this);
}
@Override
public void clear() {
this.e = null;
+ this.f = null;
}
public InvalidSessionHandle getE() {
return this.e;
}
- public requestResource_result setE(InvalidSessionHandle e) {
+ public sessionHeartbeat_result setE(InvalidSessionHandle e) {
this.e = e;
return this;
}
@@ -4846,6 +5748,30 @@ public void setEIsSet(boolean value) {
}
}
+ public SafeModeException getF() {
+ return this.f;
+ }
+
+ public sessionHeartbeat_result setF(SafeModeException f) {
+ this.f = f;
+ return this;
+ }
+
+ public void unsetF() {
+ this.f = null;
+ }
+
+ /** Returns true if field f is set (has been assigned a value) and false otherwise */
+ public boolean isSetF() {
+ return this.f != null;
+ }
+
+ public void setFIsSet(boolean value) {
+ if (!value) {
+ this.f = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case E:
@@ -4856,6 +5782,14 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
+ case F:
+ if (value == null) {
+ unsetF();
+ } else {
+ setF((SafeModeException)value);
+ }
+ break;
+
}
}
@@ -4864,6 +5798,9 @@ public Object getFieldValue(_Fields field) {
case E:
return getE();
+ case F:
+ return getF();
+
}
throw new IllegalStateException();
}
@@ -4877,6 +5814,8 @@ public boolean isSet(_Fields field) {
switch (field) {
case E:
return isSetE();
+ case F:
+ return isSetF();
}
throw new IllegalStateException();
}
@@ -4885,12 +5824,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof requestResource_result)
- return this.equals((requestResource_result)that);
+ if (that instanceof sessionHeartbeat_result)
+ return this.equals((sessionHeartbeat_result)that);
return false;
}
- public boolean equals(requestResource_result that) {
+ public boolean equals(sessionHeartbeat_result that) {
if (that == null)
return false;
@@ -4903,6 +5842,15 @@ public boolean equals(requestResource_result that) {
return false;
}
+ boolean this_present_f = true && this.isSetF();
+ boolean that_present_f = true && that.isSetF();
+ if (this_present_f || that_present_f) {
+ if (!(this_present_f && that_present_f))
+ return false;
+ if (!this.f.equals(that.f))
+ return false;
+ }
+
return true;
}
@@ -4911,13 +5859,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(requestResource_result other) {
+ public int compareTo(sessionHeartbeat_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- requestResource_result typedOther = (requestResource_result)other;
+ sessionHeartbeat_result typedOther = (sessionHeartbeat_result)other;
lastComparison = Boolean.valueOf(isSetE()).compareTo(typedOther.isSetE());
if (lastComparison != 0) {
@@ -4929,6 +5877,16 @@ public int compareTo(requestResource_result other) {
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetF()).compareTo(typedOther.isSetF());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetF()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.f, typedOther.f);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -4954,6 +5912,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
+ case 2: // F
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.f = new SafeModeException();
+ this.f.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -4972,6 +5938,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
oprot.writeFieldBegin(E_FIELD_DESC);
this.e.write(oprot);
oprot.writeFieldEnd();
+ } else if (this.isSetF()) {
+ oprot.writeFieldBegin(F_FIELD_DESC);
+ this.f.write(oprot);
+ oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
@@ -4979,7 +5949,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("requestResource_result(");
+ StringBuilder sb = new StringBuilder("sessionHeartbeat_result(");
boolean first = true;
sb.append("e:");
@@ -4989,6 +5959,14 @@ public String toString() {
sb.append(this.e);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("f:");
+ if (this.f == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.f);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -5015,19 +5993,19 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class releaseResource_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("releaseResource_args");
+ public static class sessionHeartbeatV2_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionHeartbeatV2_args");
private static final org.apache.thrift.protocol.TField HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("handle", org.apache.thrift.protocol.TType.STRING, (short)1);
- private static final org.apache.thrift.protocol.TField ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("idList", org.apache.thrift.protocol.TType.LIST, (short)2);
+ private static final org.apache.thrift.protocol.TField HEARTBEAT_ARGS_FIELD_DESC = new org.apache.thrift.protocol.TField("heartbeatArgs", org.apache.thrift.protocol.TType.STRUCT, (short)2);
public String handle; // required
- public List idList; // required
+ public HeartbeatArgs heartbeatArgs; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
HANDLE((short)1, "handle"),
- ID_LIST((short)2, "idList");
+ HEARTBEAT_ARGS((short)2, "heartbeatArgs");
private static final Map byName = new HashMap();
@@ -5044,8 +6022,8 @@ public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // HANDLE
return HANDLE;
- case 2: // ID_LIST
- return ID_LIST;
+ case 2: // HEARTBEAT_ARGS
+ return HEARTBEAT_ARGS;
default:
return null;
}
@@ -5092,56 +6070,51 @@ public String getFieldName() {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.HANDLE, new org.apache.thrift.meta_data.FieldMetaData("handle", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "SessionHandle")));
- tmpMap.put(_Fields.ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("idList", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
- new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "ResourceRequestId"))));
+ tmpMap.put(_Fields.HEARTBEAT_ARGS, new org.apache.thrift.meta_data.FieldMetaData("heartbeatArgs", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HeartbeatArgs.class)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(releaseResource_args.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionHeartbeatV2_args.class, metaDataMap);
}
- public releaseResource_args() {
+ public sessionHeartbeatV2_args() {
}
- public releaseResource_args(
+ public sessionHeartbeatV2_args(
String handle,
- List idList)
+ HeartbeatArgs heartbeatArgs)
{
this();
this.handle = handle;
- this.idList = idList;
+ this.heartbeatArgs = heartbeatArgs;
}
/**
* Performs a deep copy on other.
*/
- public releaseResource_args(releaseResource_args other) {
+ public sessionHeartbeatV2_args(sessionHeartbeatV2_args other) {
if (other.isSetHandle()) {
this.handle = other.handle;
}
- if (other.isSetIdList()) {
- List __this__idList = new ArrayList();
- for (Integer other_element : other.idList) {
- __this__idList.add(other_element);
- }
- this.idList = __this__idList;
+ if (other.isSetHeartbeatArgs()) {
+ this.heartbeatArgs = new HeartbeatArgs(other.heartbeatArgs);
}
}
- public releaseResource_args deepCopy() {
- return new releaseResource_args(this);
+ public sessionHeartbeatV2_args deepCopy() {
+ return new sessionHeartbeatV2_args(this);
}
@Override
public void clear() {
this.handle = null;
- this.idList = null;
+ this.heartbeatArgs = null;
}
public String getHandle() {
return this.handle;
}
- public releaseResource_args setHandle(String handle) {
+ public sessionHeartbeatV2_args setHandle(String handle) {
this.handle = handle;
return this;
}
@@ -5161,42 +6134,27 @@ public void setHandleIsSet(boolean value) {
}
}
- public int getIdListSize() {
- return (this.idList == null) ? 0 : this.idList.size();
- }
-
- public java.util.Iterator getIdListIterator() {
- return (this.idList == null) ? null : this.idList.iterator();
- }
-
- public void addToIdList(int elem) {
- if (this.idList == null) {
- this.idList = new ArrayList();
- }
- this.idList.add(elem);
- }
-
- public List getIdList() {
- return this.idList;
+ public HeartbeatArgs getHeartbeatArgs() {
+ return this.heartbeatArgs;
}
- public releaseResource_args setIdList(List idList) {
- this.idList = idList;
+ public sessionHeartbeatV2_args setHeartbeatArgs(HeartbeatArgs heartbeatArgs) {
+ this.heartbeatArgs = heartbeatArgs;
return this;
}
- public void unsetIdList() {
- this.idList = null;
+ public void unsetHeartbeatArgs() {
+ this.heartbeatArgs = null;
}
- /** Returns true if field idList is set (has been assigned a value) and false otherwise */
- public boolean isSetIdList() {
- return this.idList != null;
+ /** Returns true if field heartbeatArgs is set (has been assigned a value) and false otherwise */
+ public boolean isSetHeartbeatArgs() {
+ return this.heartbeatArgs != null;
}
- public void setIdListIsSet(boolean value) {
+ public void setHeartbeatArgsIsSet(boolean value) {
if (!value) {
- this.idList = null;
+ this.heartbeatArgs = null;
}
}
@@ -5210,11 +6168,11 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
- case ID_LIST:
+ case HEARTBEAT_ARGS:
if (value == null) {
- unsetIdList();
+ unsetHeartbeatArgs();
} else {
- setIdList((List)value);
+ setHeartbeatArgs((HeartbeatArgs)value);
}
break;
@@ -5226,8 +6184,8 @@ public Object getFieldValue(_Fields field) {
case HANDLE:
return getHandle();
- case ID_LIST:
- return getIdList();
+ case HEARTBEAT_ARGS:
+ return getHeartbeatArgs();
}
throw new IllegalStateException();
@@ -5242,8 +6200,8 @@ public boolean isSet(_Fields field) {
switch (field) {
case HANDLE:
return isSetHandle();
- case ID_LIST:
- return isSetIdList();
+ case HEARTBEAT_ARGS:
+ return isSetHeartbeatArgs();
}
throw new IllegalStateException();
}
@@ -5252,12 +6210,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof releaseResource_args)
- return this.equals((releaseResource_args)that);
+ if (that instanceof sessionHeartbeatV2_args)
+ return this.equals((sessionHeartbeatV2_args)that);
return false;
}
- public boolean equals(releaseResource_args that) {
+ public boolean equals(sessionHeartbeatV2_args that) {
if (that == null)
return false;
@@ -5270,12 +6228,12 @@ public boolean equals(releaseResource_args that) {
return false;
}
- boolean this_present_idList = true && this.isSetIdList();
- boolean that_present_idList = true && that.isSetIdList();
- if (this_present_idList || that_present_idList) {
- if (!(this_present_idList && that_present_idList))
+ boolean this_present_heartbeatArgs = true && this.isSetHeartbeatArgs();
+ boolean that_present_heartbeatArgs = true && that.isSetHeartbeatArgs();
+ if (this_present_heartbeatArgs || that_present_heartbeatArgs) {
+ if (!(this_present_heartbeatArgs && that_present_heartbeatArgs))
return false;
- if (!this.idList.equals(that.idList))
+ if (!this.heartbeatArgs.equals(that.heartbeatArgs))
return false;
}
@@ -5287,13 +6245,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(releaseResource_args other) {
+ public int compareTo(sessionHeartbeatV2_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- releaseResource_args typedOther = (releaseResource_args)other;
+ sessionHeartbeatV2_args typedOther = (sessionHeartbeatV2_args)other;
lastComparison = Boolean.valueOf(isSetHandle()).compareTo(typedOther.isSetHandle());
if (lastComparison != 0) {
@@ -5305,12 +6263,12 @@ public int compareTo(releaseResource_args other) {
return lastComparison;
}
}
- lastComparison = Boolean.valueOf(isSetIdList()).compareTo(typedOther.isSetIdList());
+ lastComparison = Boolean.valueOf(isSetHeartbeatArgs()).compareTo(typedOther.isSetHeartbeatArgs());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetIdList()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.idList, typedOther.idList);
+ if (isSetHeartbeatArgs()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.heartbeatArgs, typedOther.heartbeatArgs);
if (lastComparison != 0) {
return lastComparison;
}
@@ -5339,19 +6297,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
- case 2: // ID_LIST
- if (field.type == org.apache.thrift.protocol.TType.LIST) {
- {
- org.apache.thrift.protocol.TList _list34 = iprot.readListBegin();
- this.idList = new ArrayList(_list34.size);
- for (int _i35 = 0; _i35 < _list34.size; ++_i35)
- {
- int _elem36; // required
- _elem36 = iprot.readI32();
- this.idList.add(_elem36);
- }
- iprot.readListEnd();
- }
+ case 2: // HEARTBEAT_ARGS
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.heartbeatArgs = new HeartbeatArgs();
+ this.heartbeatArgs.read(iprot);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -5376,16 +6325,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
oprot.writeString(this.handle);
oprot.writeFieldEnd();
}
- if (this.idList != null) {
- oprot.writeFieldBegin(ID_LIST_FIELD_DESC);
- {
- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, this.idList.size()));
- for (int _iter37 : this.idList)
- {
- oprot.writeI32(_iter37);
- }
- oprot.writeListEnd();
- }
+ if (this.heartbeatArgs != null) {
+ oprot.writeFieldBegin(HEARTBEAT_ARGS_FIELD_DESC);
+ this.heartbeatArgs.write(oprot);
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
@@ -5394,7 +6336,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("releaseResource_args(");
+ StringBuilder sb = new StringBuilder("sessionHeartbeatV2_args(");
boolean first = true;
sb.append("handle:");
@@ -5405,11 +6347,11 @@ public String toString() {
}
first = false;
if (!first) sb.append(", ");
- sb.append("idList:");
- if (this.idList == null) {
+ sb.append("heartbeatArgs:");
+ if (this.heartbeatArgs == null) {
sb.append("null");
} else {
- sb.append(this.idList);
+ sb.append(this.heartbeatArgs);
}
first = false;
sb.append(")");
@@ -5438,16 +6380,19 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class releaseResource_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("releaseResource_result");
+ public static class sessionHeartbeatV2_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("sessionHeartbeatV2_result");
private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField F_FIELD_DESC = new org.apache.thrift.protocol.TField("f", org.apache.thrift.protocol.TType.STRUCT, (short)2);
public InvalidSessionHandle e; // required
+ public SafeModeException f; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- E((short)1, "e");
+ E((short)1, "e"),
+ F((short)2, "f");
private static final Map byName = new HashMap();
@@ -5464,6 +6409,8 @@ public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // E
return E;
+ case 2: // F
+ return F;
default:
return null;
}
@@ -5510,43 +6457,51 @@ public String getFieldName() {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.F, new org.apache.thrift.meta_data.FieldMetaData("f", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(releaseResource_result.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(sessionHeartbeatV2_result.class, metaDataMap);
}
- public releaseResource_result() {
+ public sessionHeartbeatV2_result() {
}
- public releaseResource_result(
- InvalidSessionHandle e)
+ public sessionHeartbeatV2_result(
+ InvalidSessionHandle e,
+ SafeModeException f)
{
this();
this.e = e;
+ this.f = f;
}
/**
* Performs a deep copy on other.
*/
- public releaseResource_result(releaseResource_result other) {
+ public sessionHeartbeatV2_result(sessionHeartbeatV2_result other) {
if (other.isSetE()) {
this.e = new InvalidSessionHandle(other.e);
}
+ if (other.isSetF()) {
+ this.f = new SafeModeException(other.f);
+ }
}
- public releaseResource_result deepCopy() {
- return new releaseResource_result(this);
+ public sessionHeartbeatV2_result deepCopy() {
+ return new sessionHeartbeatV2_result(this);
}
@Override
public void clear() {
this.e = null;
+ this.f = null;
}
public InvalidSessionHandle getE() {
return this.e;
}
- public releaseResource_result setE(InvalidSessionHandle e) {
+ public sessionHeartbeatV2_result setE(InvalidSessionHandle e) {
this.e = e;
return this;
}
@@ -5566,6 +6521,30 @@ public void setEIsSet(boolean value) {
}
}
+ public SafeModeException getF() {
+ return this.f;
+ }
+
+ public sessionHeartbeatV2_result setF(SafeModeException f) {
+ this.f = f;
+ return this;
+ }
+
+ public void unsetF() {
+ this.f = null;
+ }
+
+ /** Returns true if field f is set (has been assigned a value) and false otherwise */
+ public boolean isSetF() {
+ return this.f != null;
+ }
+
+ public void setFIsSet(boolean value) {
+ if (!value) {
+ this.f = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case E:
@@ -5576,6 +6555,14 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
+ case F:
+ if (value == null) {
+ unsetF();
+ } else {
+ setF((SafeModeException)value);
+ }
+ break;
+
}
}
@@ -5584,6 +6571,9 @@ public Object getFieldValue(_Fields field) {
case E:
return getE();
+ case F:
+ return getF();
+
}
throw new IllegalStateException();
}
@@ -5597,6 +6587,8 @@ public boolean isSet(_Fields field) {
switch (field) {
case E:
return isSetE();
+ case F:
+ return isSetF();
}
throw new IllegalStateException();
}
@@ -5605,12 +6597,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof releaseResource_result)
- return this.equals((releaseResource_result)that);
+ if (that instanceof sessionHeartbeatV2_result)
+ return this.equals((sessionHeartbeatV2_result)that);
return false;
}
- public boolean equals(releaseResource_result that) {
+ public boolean equals(sessionHeartbeatV2_result that) {
if (that == null)
return false;
@@ -5623,6 +6615,15 @@ public boolean equals(releaseResource_result that) {
return false;
}
+ boolean this_present_f = true && this.isSetF();
+ boolean that_present_f = true && that.isSetF();
+ if (this_present_f || that_present_f) {
+ if (!(this_present_f && that_present_f))
+ return false;
+ if (!this.f.equals(that.f))
+ return false;
+ }
+
return true;
}
@@ -5631,13 +6632,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(releaseResource_result other) {
+ public int compareTo(sessionHeartbeatV2_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- releaseResource_result typedOther = (releaseResource_result)other;
+ sessionHeartbeatV2_result typedOther = (sessionHeartbeatV2_result)other;
lastComparison = Boolean.valueOf(isSetE()).compareTo(typedOther.isSetE());
if (lastComparison != 0) {
@@ -5649,6 +6650,16 @@ public int compareTo(releaseResource_result other) {
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetF()).compareTo(typedOther.isSetF());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetF()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.f, typedOther.f);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -5674,6 +6685,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
+ case 2: // F
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.f = new SafeModeException();
+ this.f.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -5692,6 +6711,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
oprot.writeFieldBegin(E_FIELD_DESC);
this.e.write(oprot);
oprot.writeFieldEnd();
+ } else if (this.isSetF()) {
+ oprot.writeFieldBegin(F_FIELD_DESC);
+ this.f.write(oprot);
+ oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
@@ -5699,7 +6722,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("releaseResource_result(");
+ StringBuilder sb = new StringBuilder("sessionHeartbeatV2_result(");
boolean first = true;
sb.append("e:");
@@ -5709,6 +6732,14 @@ public String toString() {
sb.append(this.e);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("f:");
+ if (this.f == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.f);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -5735,16 +6766,19 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class nodeHeartbeat_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("nodeHeartbeat_args");
-
- private static final org.apache.thrift.protocol.TField NODE_FIELD_DESC = new org.apache.thrift.protocol.TField("node", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ public static class requestResource_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("requestResource_args");
- public ClusterNodeInfo node; // required
+ private static final org.apache.thrift.protocol.TField HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("handle", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField REQUEST_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("requestList", org.apache.thrift.protocol.TType.LIST, (short)2);
+
+ public String handle; // required
+ public List requestList; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- NODE((short)1, "node");
+ HANDLE((short)1, "handle"),
+ REQUEST_LIST((short)2, "requestList");
private static final Map byName = new HashMap();
@@ -5759,8 +6793,10 @@ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
*/
public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
- case 1: // NODE
- return NODE;
+ case 1: // HANDLE
+ return HANDLE;
+ case 2: // REQUEST_LIST
+ return REQUEST_LIST;
default:
return null;
}
@@ -5805,71 +6841,131 @@ public String getFieldName() {
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
- tmpMap.put(_Fields.NODE, new org.apache.thrift.meta_data.FieldMetaData("node", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClusterNodeInfo.class)));
+ tmpMap.put(_Fields.HANDLE, new org.apache.thrift.meta_data.FieldMetaData("handle", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "SessionHandle")));
+ tmpMap.put(_Fields.REQUEST_LIST, new org.apache.thrift.meta_data.FieldMetaData("requestList", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ResourceRequest.class))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(nodeHeartbeat_args.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(requestResource_args.class, metaDataMap);
}
- public nodeHeartbeat_args() {
+ public requestResource_args() {
}
- public nodeHeartbeat_args(
- ClusterNodeInfo node)
+ public requestResource_args(
+ String handle,
+ List requestList)
{
this();
- this.node = node;
+ this.handle = handle;
+ this.requestList = requestList;
}
/**
* Performs a deep copy on other.
*/
- public nodeHeartbeat_args(nodeHeartbeat_args other) {
- if (other.isSetNode()) {
- this.node = new ClusterNodeInfo(other.node);
+ public requestResource_args(requestResource_args other) {
+ if (other.isSetHandle()) {
+ this.handle = other.handle;
+ }
+ if (other.isSetRequestList()) {
+ List __this__requestList = new ArrayList();
+ for (ResourceRequest other_element : other.requestList) {
+ __this__requestList.add(new ResourceRequest(other_element));
+ }
+ this.requestList = __this__requestList;
}
}
- public nodeHeartbeat_args deepCopy() {
- return new nodeHeartbeat_args(this);
+ public requestResource_args deepCopy() {
+ return new requestResource_args(this);
}
@Override
public void clear() {
- this.node = null;
+ this.handle = null;
+ this.requestList = null;
}
- public ClusterNodeInfo getNode() {
- return this.node;
+ public String getHandle() {
+ return this.handle;
}
- public nodeHeartbeat_args setNode(ClusterNodeInfo node) {
- this.node = node;
+ public requestResource_args setHandle(String handle) {
+ this.handle = handle;
return this;
}
- public void unsetNode() {
- this.node = null;
+ public void unsetHandle() {
+ this.handle = null;
}
- /** Returns true if field node is set (has been assigned a value) and false otherwise */
- public boolean isSetNode() {
- return this.node != null;
+ /** Returns true if field handle is set (has been assigned a value) and false otherwise */
+ public boolean isSetHandle() {
+ return this.handle != null;
}
- public void setNodeIsSet(boolean value) {
+ public void setHandleIsSet(boolean value) {
if (!value) {
- this.node = null;
+ this.handle = null;
+ }
+ }
+
+ public int getRequestListSize() {
+ return (this.requestList == null) ? 0 : this.requestList.size();
+ }
+
+ public java.util.Iterator getRequestListIterator() {
+ return (this.requestList == null) ? null : this.requestList.iterator();
+ }
+
+ public void addToRequestList(ResourceRequest elem) {
+ if (this.requestList == null) {
+ this.requestList = new ArrayList();
+ }
+ this.requestList.add(elem);
+ }
+
+ public List getRequestList() {
+ return this.requestList;
+ }
+
+ public requestResource_args setRequestList(List requestList) {
+ this.requestList = requestList;
+ return this;
+ }
+
+ public void unsetRequestList() {
+ this.requestList = null;
+ }
+
+ /** Returns true if field requestList is set (has been assigned a value) and false otherwise */
+ public boolean isSetRequestList() {
+ return this.requestList != null;
+ }
+
+ public void setRequestListIsSet(boolean value) {
+ if (!value) {
+ this.requestList = null;
}
}
public void setFieldValue(_Fields field, Object value) {
switch (field) {
- case NODE:
+ case HANDLE:
if (value == null) {
- unsetNode();
+ unsetHandle();
} else {
- setNode((ClusterNodeInfo)value);
+ setHandle((String)value);
+ }
+ break;
+
+ case REQUEST_LIST:
+ if (value == null) {
+ unsetRequestList();
+ } else {
+ setRequestList((List)value);
}
break;
@@ -5878,8 +6974,11 @@ public void setFieldValue(_Fields field, Object value) {
public Object getFieldValue(_Fields field) {
switch (field) {
- case NODE:
- return getNode();
+ case HANDLE:
+ return getHandle();
+
+ case REQUEST_LIST:
+ return getRequestList();
}
throw new IllegalStateException();
@@ -5892,8 +6991,10 @@ public boolean isSet(_Fields field) {
}
switch (field) {
- case NODE:
- return isSetNode();
+ case HANDLE:
+ return isSetHandle();
+ case REQUEST_LIST:
+ return isSetRequestList();
}
throw new IllegalStateException();
}
@@ -5902,21 +7003,30 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof nodeHeartbeat_args)
- return this.equals((nodeHeartbeat_args)that);
+ if (that instanceof requestResource_args)
+ return this.equals((requestResource_args)that);
return false;
}
- public boolean equals(nodeHeartbeat_args that) {
+ public boolean equals(requestResource_args that) {
if (that == null)
return false;
- boolean this_present_node = true && this.isSetNode();
- boolean that_present_node = true && that.isSetNode();
- if (this_present_node || that_present_node) {
- if (!(this_present_node && that_present_node))
+ boolean this_present_handle = true && this.isSetHandle();
+ boolean that_present_handle = true && that.isSetHandle();
+ if (this_present_handle || that_present_handle) {
+ if (!(this_present_handle && that_present_handle))
return false;
- if (!this.node.equals(that.node))
+ if (!this.handle.equals(that.handle))
+ return false;
+ }
+
+ boolean this_present_requestList = true && this.isSetRequestList();
+ boolean that_present_requestList = true && that.isSetRequestList();
+ if (this_present_requestList || that_present_requestList) {
+ if (!(this_present_requestList && that_present_requestList))
+ return false;
+ if (!this.requestList.equals(that.requestList))
return false;
}
@@ -5928,20 +7038,30 @@ public int hashCode() {
return 0;
}
- public int compareTo(nodeHeartbeat_args other) {
+ public int compareTo(requestResource_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- nodeHeartbeat_args typedOther = (nodeHeartbeat_args)other;
+ requestResource_args typedOther = (requestResource_args)other;
- lastComparison = Boolean.valueOf(isSetNode()).compareTo(typedOther.isSetNode());
+ lastComparison = Boolean.valueOf(isSetHandle()).compareTo(typedOther.isSetHandle());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetNode()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.node, typedOther.node);
+ if (isSetHandle()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.handle, typedOther.handle);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetRequestList()).compareTo(typedOther.isSetRequestList());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetRequestList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.requestList, typedOther.requestList);
if (lastComparison != 0) {
return lastComparison;
}
@@ -5963,10 +7083,27 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
break;
}
switch (field.id) {
- case 1: // NODE
- if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
- this.node = new ClusterNodeInfo();
- this.node.read(iprot);
+ case 1: // HANDLE
+ if (field.type == org.apache.thrift.protocol.TType.STRING) {
+ this.handle = iprot.readString();
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case 2: // REQUEST_LIST
+ if (field.type == org.apache.thrift.protocol.TType.LIST) {
+ {
+ org.apache.thrift.protocol.TList _list30 = iprot.readListBegin();
+ this.requestList = new ArrayList(_list30.size);
+ for (int _i31 = 0; _i31 < _list30.size; ++_i31)
+ {
+ ResourceRequest _elem32; // required
+ _elem32 = new ResourceRequest();
+ _elem32.read(iprot);
+ this.requestList.add(_elem32);
+ }
+ iprot.readListEnd();
+ }
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -5986,9 +7123,21 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
validate();
oprot.writeStructBegin(STRUCT_DESC);
- if (this.node != null) {
- oprot.writeFieldBegin(NODE_FIELD_DESC);
- this.node.write(oprot);
+ if (this.handle != null) {
+ oprot.writeFieldBegin(HANDLE_FIELD_DESC);
+ oprot.writeString(this.handle);
+ oprot.writeFieldEnd();
+ }
+ if (this.requestList != null) {
+ oprot.writeFieldBegin(REQUEST_LIST_FIELD_DESC);
+ {
+ oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.requestList.size()));
+ for (ResourceRequest _iter33 : this.requestList)
+ {
+ _iter33.write(oprot);
+ }
+ oprot.writeListEnd();
+ }
oprot.writeFieldEnd();
}
oprot.writeFieldStop();
@@ -5997,14 +7146,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("nodeHeartbeat_args(");
+ StringBuilder sb = new StringBuilder("requestResource_args(");
boolean first = true;
- sb.append("node:");
- if (this.node == null) {
+ sb.append("handle:");
+ if (this.handle == null) {
sb.append("null");
} else {
- sb.append(this.node);
+ sb.append(this.handle);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("requestList:");
+ if (this.requestList == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.requestList);
}
first = false;
sb.append(")");
@@ -6033,16 +7190,19 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class nodeHeartbeat_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("nodeHeartbeat_result");
+ public static class requestResource_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("requestResource_result");
private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField F_FIELD_DESC = new org.apache.thrift.protocol.TField("f", org.apache.thrift.protocol.TType.STRUCT, (short)2);
- public DisallowedNode e; // required
+ public InvalidSessionHandle e; // required
+ public SafeModeException f; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
- E((short)1, "e");
+ E((short)1, "e"),
+ F((short)2, "f");
private static final Map byName = new HashMap();
@@ -6059,6 +7219,8 @@ public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // E
return E;
+ case 2: // F
+ return F;
default:
return null;
}
@@ -6105,43 +7267,51 @@ public String getFieldName() {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.F, new org.apache.thrift.meta_data.FieldMetaData("f", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(nodeHeartbeat_result.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(requestResource_result.class, metaDataMap);
}
- public nodeHeartbeat_result() {
+ public requestResource_result() {
}
- public nodeHeartbeat_result(
- DisallowedNode e)
+ public requestResource_result(
+ InvalidSessionHandle e,
+ SafeModeException f)
{
this();
this.e = e;
+ this.f = f;
}
/**
* Performs a deep copy on other.
*/
- public nodeHeartbeat_result(nodeHeartbeat_result other) {
+ public requestResource_result(requestResource_result other) {
if (other.isSetE()) {
- this.e = new DisallowedNode(other.e);
+ this.e = new InvalidSessionHandle(other.e);
+ }
+ if (other.isSetF()) {
+ this.f = new SafeModeException(other.f);
}
}
- public nodeHeartbeat_result deepCopy() {
- return new nodeHeartbeat_result(this);
+ public requestResource_result deepCopy() {
+ return new requestResource_result(this);
}
@Override
public void clear() {
this.e = null;
+ this.f = null;
}
- public DisallowedNode getE() {
+ public InvalidSessionHandle getE() {
return this.e;
}
- public nodeHeartbeat_result setE(DisallowedNode e) {
+ public requestResource_result setE(InvalidSessionHandle e) {
this.e = e;
return this;
}
@@ -6161,13 +7331,45 @@ public void setEIsSet(boolean value) {
}
}
+ public SafeModeException getF() {
+ return this.f;
+ }
+
+ public requestResource_result setF(SafeModeException f) {
+ this.f = f;
+ return this;
+ }
+
+ public void unsetF() {
+ this.f = null;
+ }
+
+ /** Returns true if field f is set (has been assigned a value) and false otherwise */
+ public boolean isSetF() {
+ return this.f != null;
+ }
+
+ public void setFIsSet(boolean value) {
+ if (!value) {
+ this.f = null;
+ }
+ }
+
public void setFieldValue(_Fields field, Object value) {
switch (field) {
case E:
if (value == null) {
unsetE();
} else {
- setE((DisallowedNode)value);
+ setE((InvalidSessionHandle)value);
+ }
+ break;
+
+ case F:
+ if (value == null) {
+ unsetF();
+ } else {
+ setF((SafeModeException)value);
}
break;
@@ -6179,6 +7381,9 @@ public Object getFieldValue(_Fields field) {
case E:
return getE();
+ case F:
+ return getF();
+
}
throw new IllegalStateException();
}
@@ -6192,6 +7397,8 @@ public boolean isSet(_Fields field) {
switch (field) {
case E:
return isSetE();
+ case F:
+ return isSetF();
}
throw new IllegalStateException();
}
@@ -6200,12 +7407,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof nodeHeartbeat_result)
- return this.equals((nodeHeartbeat_result)that);
+ if (that instanceof requestResource_result)
+ return this.equals((requestResource_result)that);
return false;
}
- public boolean equals(nodeHeartbeat_result that) {
+ public boolean equals(requestResource_result that) {
if (that == null)
return false;
@@ -6218,6 +7425,15 @@ public boolean equals(nodeHeartbeat_result that) {
return false;
}
+ boolean this_present_f = true && this.isSetF();
+ boolean that_present_f = true && that.isSetF();
+ if (this_present_f || that_present_f) {
+ if (!(this_present_f && that_present_f))
+ return false;
+ if (!this.f.equals(that.f))
+ return false;
+ }
+
return true;
}
@@ -6226,13 +7442,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(nodeHeartbeat_result other) {
+ public int compareTo(requestResource_result other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- nodeHeartbeat_result typedOther = (nodeHeartbeat_result)other;
+ requestResource_result typedOther = (requestResource_result)other;
lastComparison = Boolean.valueOf(isSetE()).compareTo(typedOther.isSetE());
if (lastComparison != 0) {
@@ -6244,6 +7460,16 @@ public int compareTo(nodeHeartbeat_result other) {
return lastComparison;
}
}
+ lastComparison = Boolean.valueOf(isSetF()).compareTo(typedOther.isSetF());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetF()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.f, typedOther.f);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
return 0;
}
@@ -6263,12 +7489,20 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
switch (field.id) {
case 1: // E
if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
- this.e = new DisallowedNode();
+ this.e = new InvalidSessionHandle();
this.e.read(iprot);
} else {
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
+ case 2: // F
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.f = new SafeModeException();
+ this.f.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
default:
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
@@ -6287,6 +7521,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
oprot.writeFieldBegin(E_FIELD_DESC);
this.e.write(oprot);
oprot.writeFieldEnd();
+ } else if (this.isSetF()) {
+ oprot.writeFieldBegin(F_FIELD_DESC);
+ this.f.write(oprot);
+ oprot.writeFieldEnd();
}
oprot.writeFieldStop();
oprot.writeStructEnd();
@@ -6294,7 +7532,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("nodeHeartbeat_result(");
+ StringBuilder sb = new StringBuilder("requestResource_result(");
boolean first = true;
sb.append("e:");
@@ -6304,6 +7542,14 @@ public String toString() {
sb.append(this.e);
}
first = false;
+ if (!first) sb.append(", ");
+ sb.append("f:");
+ if (this.f == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.f);
+ }
+ first = false;
sb.append(")");
return sb.toString();
}
@@ -6330,22 +7576,19 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException
}
- public static class nodeFeedback_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
- private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("nodeFeedback_args");
+ public static class releaseResource_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("releaseResource_args");
private static final org.apache.thrift.protocol.TField HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("handle", org.apache.thrift.protocol.TType.STRING, (short)1);
- private static final org.apache.thrift.protocol.TField RESOURCE_TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("resourceTypes", org.apache.thrift.protocol.TType.LIST, (short)2);
- private static final org.apache.thrift.protocol.TField STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("stats", org.apache.thrift.protocol.TType.LIST, (short)3);
+ private static final org.apache.thrift.protocol.TField ID_LIST_FIELD_DESC = new org.apache.thrift.protocol.TField("idList", org.apache.thrift.protocol.TType.LIST, (short)2);
public String handle; // required
- public List resourceTypes; // required
- public List stats; // required
+ public List idList; // required
/** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
public enum _Fields implements org.apache.thrift.TFieldIdEnum {
HANDLE((short)1, "handle"),
- RESOURCE_TYPES((short)2, "resourceTypes"),
- STATS((short)3, "stats");
+ ID_LIST((short)2, "idList");
private static final Map byName = new HashMap();
@@ -6362,10 +7605,8 @@ public static _Fields findByThriftId(int fieldId) {
switch(fieldId) {
case 1: // HANDLE
return HANDLE;
- case 2: // RESOURCE_TYPES
- return RESOURCE_TYPES;
- case 3: // STATS
- return STATS;
+ case 2: // ID_LIST
+ return ID_LIST;
default:
return null;
}
@@ -6412,69 +7653,56 @@ public String getFieldName() {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
tmpMap.put(_Fields.HANDLE, new org.apache.thrift.meta_data.FieldMetaData("handle", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "SessionHandle")));
- tmpMap.put(_Fields.RESOURCE_TYPES, new org.apache.thrift.meta_data.FieldMetaData("resourceTypes", org.apache.thrift.TFieldRequirementType.DEFAULT,
- new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
- new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ResourceType.class))));
- tmpMap.put(_Fields.STATS, new org.apache.thrift.meta_data.FieldMetaData("stats", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ tmpMap.put(_Fields.ID_LIST, new org.apache.thrift.meta_data.FieldMetaData("idList", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
- new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NodeUsageReport.class))));
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32 , "ResourceRequestId"))));
metaDataMap = Collections.unmodifiableMap(tmpMap);
- org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(nodeFeedback_args.class, metaDataMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(releaseResource_args.class, metaDataMap);
}
- public nodeFeedback_args() {
+ public releaseResource_args() {
}
- public nodeFeedback_args(
+ public releaseResource_args(
String handle,
- List resourceTypes,
- List stats)
+ List idList)
{
this();
this.handle = handle;
- this.resourceTypes = resourceTypes;
- this.stats = stats;
+ this.idList = idList;
}
/**
* Performs a deep copy on other.
*/
- public nodeFeedback_args(nodeFeedback_args other) {
+ public releaseResource_args(releaseResource_args other) {
if (other.isSetHandle()) {
this.handle = other.handle;
}
- if (other.isSetResourceTypes()) {
- List __this__resourceTypes = new ArrayList();
- for (ResourceType other_element : other.resourceTypes) {
- __this__resourceTypes.add(other_element);
- }
- this.resourceTypes = __this__resourceTypes;
- }
- if (other.isSetStats()) {
- List __this__stats = new ArrayList();
- for (NodeUsageReport other_element : other.stats) {
- __this__stats.add(new NodeUsageReport(other_element));
+ if (other.isSetIdList()) {
+ List __this__idList = new ArrayList();
+ for (Integer other_element : other.idList) {
+ __this__idList.add(other_element);
}
- this.stats = __this__stats;
+ this.idList = __this__idList;
}
}
- public nodeFeedback_args deepCopy() {
- return new nodeFeedback_args(this);
+ public releaseResource_args deepCopy() {
+ return new releaseResource_args(this);
}
@Override
public void clear() {
this.handle = null;
- this.resourceTypes = null;
- this.stats = null;
+ this.idList = null;
}
public String getHandle() {
return this.handle;
}
- public nodeFeedback_args setHandle(String handle) {
+ public releaseResource_args setHandle(String handle) {
this.handle = handle;
return this;
}
@@ -6494,81 +7722,42 @@ public void setHandleIsSet(boolean value) {
}
}
- public int getResourceTypesSize() {
- return (this.resourceTypes == null) ? 0 : this.resourceTypes.size();
- }
-
- public java.util.Iterator getResourceTypesIterator() {
- return (this.resourceTypes == null) ? null : this.resourceTypes.iterator();
- }
-
- public void addToResourceTypes(ResourceType elem) {
- if (this.resourceTypes == null) {
- this.resourceTypes = new ArrayList();
- }
- this.resourceTypes.add(elem);
- }
-
- public List getResourceTypes() {
- return this.resourceTypes;
- }
-
- public nodeFeedback_args setResourceTypes(List resourceTypes) {
- this.resourceTypes = resourceTypes;
- return this;
- }
-
- public void unsetResourceTypes() {
- this.resourceTypes = null;
- }
-
- /** Returns true if field resourceTypes is set (has been assigned a value) and false otherwise */
- public boolean isSetResourceTypes() {
- return this.resourceTypes != null;
- }
-
- public void setResourceTypesIsSet(boolean value) {
- if (!value) {
- this.resourceTypes = null;
- }
- }
-
- public int getStatsSize() {
- return (this.stats == null) ? 0 : this.stats.size();
+ public int getIdListSize() {
+ return (this.idList == null) ? 0 : this.idList.size();
}
- public java.util.Iterator getStatsIterator() {
- return (this.stats == null) ? null : this.stats.iterator();
+ public java.util.Iterator getIdListIterator() {
+ return (this.idList == null) ? null : this.idList.iterator();
}
- public void addToStats(NodeUsageReport elem) {
- if (this.stats == null) {
- this.stats = new ArrayList();
+ public void addToIdList(int elem) {
+ if (this.idList == null) {
+ this.idList = new ArrayList();
}
- this.stats.add(elem);
+ this.idList.add(elem);
}
- public List getStats() {
- return this.stats;
+ public List getIdList() {
+ return this.idList;
}
- public nodeFeedback_args setStats(List stats) {
- this.stats = stats;
+ public releaseResource_args setIdList(List idList) {
+ this.idList = idList;
return this;
}
- public void unsetStats() {
- this.stats = null;
+ public void unsetIdList() {
+ this.idList = null;
}
- /** Returns true if field stats is set (has been assigned a value) and false otherwise */
- public boolean isSetStats() {
- return this.stats != null;
+ /** Returns true if field idList is set (has been assigned a value) and false otherwise */
+ public boolean isSetIdList() {
+ return this.idList != null;
}
- public void setStatsIsSet(boolean value) {
+ public void setIdListIsSet(boolean value) {
if (!value) {
- this.stats = null;
+ this.idList = null;
}
}
@@ -6582,19 +7771,11 @@ public void setFieldValue(_Fields field, Object value) {
}
break;
- case RESOURCE_TYPES:
- if (value == null) {
- unsetResourceTypes();
- } else {
- setResourceTypes((List)value);
- }
- break;
-
- case STATS:
+ case ID_LIST:
if (value == null) {
- unsetStats();
+ unsetIdList();
} else {
- setStats((List)value);
+ setIdList((List)value);
}
break;
@@ -6606,11 +7787,8 @@ public Object getFieldValue(_Fields field) {
case HANDLE:
return getHandle();
- case RESOURCE_TYPES:
- return getResourceTypes();
-
- case STATS:
- return getStats();
+ case ID_LIST:
+ return getIdList();
}
throw new IllegalStateException();
@@ -6625,10 +7803,8 @@ public boolean isSet(_Fields field) {
switch (field) {
case HANDLE:
return isSetHandle();
- case RESOURCE_TYPES:
- return isSetResourceTypes();
- case STATS:
- return isSetStats();
+ case ID_LIST:
+ return isSetIdList();
}
throw new IllegalStateException();
}
@@ -6637,12 +7813,12 @@ public boolean isSet(_Fields field) {
public boolean equals(Object that) {
if (that == null)
return false;
- if (that instanceof nodeFeedback_args)
- return this.equals((nodeFeedback_args)that);
+ if (that instanceof releaseResource_args)
+ return this.equals((releaseResource_args)that);
return false;
}
- public boolean equals(nodeFeedback_args that) {
+ public boolean equals(releaseResource_args that) {
if (that == null)
return false;
@@ -6655,21 +7831,12 @@ public boolean equals(nodeFeedback_args that) {
return false;
}
- boolean this_present_resourceTypes = true && this.isSetResourceTypes();
- boolean that_present_resourceTypes = true && that.isSetResourceTypes();
- if (this_present_resourceTypes || that_present_resourceTypes) {
- if (!(this_present_resourceTypes && that_present_resourceTypes))
+ boolean this_present_idList = true && this.isSetIdList();
+ boolean that_present_idList = true && that.isSetIdList();
+ if (this_present_idList || that_present_idList) {
+ if (!(this_present_idList && that_present_idList))
return false;
- if (!this.resourceTypes.equals(that.resourceTypes))
- return false;
- }
-
- boolean this_present_stats = true && this.isSetStats();
- boolean that_present_stats = true && that.isSetStats();
- if (this_present_stats || that_present_stats) {
- if (!(this_present_stats && that_present_stats))
- return false;
- if (!this.stats.equals(that.stats))
+ if (!this.idList.equals(that.idList))
return false;
}
@@ -6681,13 +7848,13 @@ public int hashCode() {
return 0;
}
- public int compareTo(nodeFeedback_args other) {
+ public int compareTo(releaseResource_args other) {
if (!getClass().equals(other.getClass())) {
return getClass().getName().compareTo(other.getClass().getName());
}
int lastComparison = 0;
- nodeFeedback_args typedOther = (nodeFeedback_args)other;
+ releaseResource_args typedOther = (releaseResource_args)other;
lastComparison = Boolean.valueOf(isSetHandle()).compareTo(typedOther.isSetHandle());
if (lastComparison != 0) {
@@ -6699,22 +7866,12 @@ public int compareTo(nodeFeedback_args other) {
return lastComparison;
}
}
- lastComparison = Boolean.valueOf(isSetResourceTypes()).compareTo(typedOther.isSetResourceTypes());
- if (lastComparison != 0) {
- return lastComparison;
- }
- if (isSetResourceTypes()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.resourceTypes, typedOther.resourceTypes);
- if (lastComparison != 0) {
- return lastComparison;
- }
- }
- lastComparison = Boolean.valueOf(isSetStats()).compareTo(typedOther.isSetStats());
+ lastComparison = Boolean.valueOf(isSetIdList()).compareTo(typedOther.isSetIdList());
if (lastComparison != 0) {
return lastComparison;
}
- if (isSetStats()) {
- lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.stats, typedOther.stats);
+ if (isSetIdList()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.idList, typedOther.idList);
if (lastComparison != 0) {
return lastComparison;
}
@@ -6743,34 +7900,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.t
org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
}
break;
- case 2: // RESOURCE_TYPES
- if (field.type == org.apache.thrift.protocol.TType.LIST) {
- {
- org.apache.thrift.protocol.TList _list38 = iprot.readListBegin();
- this.resourceTypes = new ArrayList(_list38.size);
- for (int _i39 = 0; _i39 < _list38.size; ++_i39)
- {
- ResourceType _elem40; // required
- _elem40 = ResourceType.findByValue(iprot.readI32());
- this.resourceTypes.add(_elem40);
- }
- iprot.readListEnd();
- }
- } else {
- org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
- }
- break;
- case 3: // STATS
+ case 2: // ID_LIST
if (field.type == org.apache.thrift.protocol.TType.LIST) {
{
- org.apache.thrift.protocol.TList _list41 = iprot.readListBegin();
- this.stats = new ArrayList(_list41.size);
- for (int _i42 = 0; _i42 < _list41.size; ++_i42)
+ org.apache.thrift.protocol.TList _list34 = iprot.readListBegin();
+ this.idList = new ArrayList(_list34.size);
+ for (int _i35 = 0; _i35 < _list34.size; ++_i35)
{
- NodeUsageReport _elem43; // required
- _elem43 = new NodeUsageReport();
- _elem43.read(iprot);
- this.stats.add(_elem43);
+ int _elem36; // required
+ _elem36 = iprot.readI32();
+ this.idList.add(_elem36);
}
iprot.readListEnd();
}
@@ -6798,25 +7937,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
oprot.writeString(this.handle);
oprot.writeFieldEnd();
}
- if (this.resourceTypes != null) {
- oprot.writeFieldBegin(RESOURCE_TYPES_FIELD_DESC);
- {
- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, this.resourceTypes.size()));
- for (ResourceType _iter44 : this.resourceTypes)
- {
- oprot.writeI32(_iter44.getValue());
- }
- oprot.writeListEnd();
- }
- oprot.writeFieldEnd();
- }
- if (this.stats != null) {
- oprot.writeFieldBegin(STATS_FIELD_DESC);
+ if (this.idList != null) {
+ oprot.writeFieldBegin(ID_LIST_FIELD_DESC);
{
- oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, this.stats.size()));
- for (NodeUsageReport _iter45 : this.stats)
+ oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, this.idList.size()));
+ for (int _iter37 : this.idList)
{
- _iter45.write(oprot);
+ oprot.writeI32(_iter37);
}
oprot.writeListEnd();
}
@@ -6828,7 +7955,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.
@Override
public String toString() {
- StringBuilder sb = new StringBuilder("nodeFeedback_args(");
+ StringBuilder sb = new StringBuilder("releaseResource_args(");
boolean first = true;
sb.append("handle:");
@@ -6839,21 +7966,3493 @@ public String toString() {
}
first = false;
if (!first) sb.append(", ");
- sb.append("resourceTypes:");
- if (this.resourceTypes == null) {
+ sb.append("idList:");
+ if (this.idList == null) {
sb.append("null");
} else {
- sb.append(this.resourceTypes);
+ sb.append(this.idList);
}
first = false;
- if (!first) sb.append(", ");
- sb.append("stats:");
- if (this.stats == null) {
- sb.append("null");
- } else {
- sb.append(this.stats);
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ }
+
+ public static class releaseResource_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("releaseResource_result");
+
+ private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField F_FIELD_DESC = new org.apache.thrift.protocol.TField("f", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+
+ public InvalidSessionHandle e; // required
+ public SafeModeException f; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ E((short)1, "e"),
+ F((short)2, "f");
+
+ private static final Map byName = new HashMap();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // E
+ return E;
+ case 2: // F
+ return F;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.F, new org.apache.thrift.meta_data.FieldMetaData("f", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(releaseResource_result.class, metaDataMap);
+ }
+
+ public releaseResource_result() {
+ }
+
+ public releaseResource_result(
+ InvalidSessionHandle e,
+ SafeModeException f)
+ {
+ this();
+ this.e = e;
+ this.f = f;
+ }
+
+ /**
+ * Performs a deep copy on other.
+ */
+ public releaseResource_result(releaseResource_result other) {
+ if (other.isSetE()) {
+ this.e = new InvalidSessionHandle(other.e);
+ }
+ if (other.isSetF()) {
+ this.f = new SafeModeException(other.f);
+ }
+ }
+
+ public releaseResource_result deepCopy() {
+ return new releaseResource_result(this);
+ }
+
+ @Override
+ public void clear() {
+ this.e = null;
+ this.f = null;
+ }
+
+ public InvalidSessionHandle getE() {
+ return this.e;
+ }
+
+ public releaseResource_result setE(InvalidSessionHandle e) {
+ this.e = e;
+ return this;
+ }
+
+ public void unsetE() {
+ this.e = null;
+ }
+
+ /** Returns true if field e is set (has been assigned a value) and false otherwise */
+ public boolean isSetE() {
+ return this.e != null;
+ }
+
+ public void setEIsSet(boolean value) {
+ if (!value) {
+ this.e = null;
+ }
+ }
+
+ public SafeModeException getF() {
+ return this.f;
+ }
+
+ public releaseResource_result setF(SafeModeException f) {
+ this.f = f;
+ return this;
+ }
+
+ public void unsetF() {
+ this.f = null;
+ }
+
+ /** Returns true if field f is set (has been assigned a value) and false otherwise */
+ public boolean isSetF() {
+ return this.f != null;
+ }
+
+ public void setFIsSet(boolean value) {
+ if (!value) {
+ this.f = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case E:
+ if (value == null) {
+ unsetE();
+ } else {
+ setE((InvalidSessionHandle)value);
+ }
+ break;
+
+ case F:
+ if (value == null) {
+ unsetF();
+ } else {
+ setF((SafeModeException)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case E:
+ return getE();
+
+ case F:
+ return getF();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case E:
+ return isSetE();
+ case F:
+ return isSetF();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof releaseResource_result)
+ return this.equals((releaseResource_result)that);
+ return false;
+ }
+
+ public boolean equals(releaseResource_result that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_e = true && this.isSetE();
+ boolean that_present_e = true && that.isSetE();
+ if (this_present_e || that_present_e) {
+ if (!(this_present_e && that_present_e))
+ return false;
+ if (!this.e.equals(that.e))
+ return false;
+ }
+
+ boolean this_present_f = true && this.isSetF();
+ boolean that_present_f = true && that.isSetF();
+ if (this_present_f || that_present_f) {
+ if (!(this_present_f && that_present_f))
+ return false;
+ if (!this.f.equals(that.f))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ public int compareTo(releaseResource_result other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+ releaseResource_result typedOther = (releaseResource_result)other;
+
+ lastComparison = Boolean.valueOf(isSetE()).compareTo(typedOther.isSetE());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetE()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.e, typedOther.e);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetF()).compareTo(typedOther.isSetF());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetF()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.f, typedOther.f);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField field;
+ iprot.readStructBegin();
+ while (true)
+ {
+ field = iprot.readFieldBegin();
+ if (field.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (field.id) {
+ case 1: // E
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.e = new InvalidSessionHandle();
+ this.e.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case 2: // F
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.f = new SafeModeException();
+ this.f.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ // check for required fields of primitive type, which can't be checked in the validate method
+ validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ oprot.writeStructBegin(STRUCT_DESC);
+
+ if (this.isSetE()) {
+ oprot.writeFieldBegin(E_FIELD_DESC);
+ this.e.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetF()) {
+ oprot.writeFieldBegin(F_FIELD_DESC);
+ this.f.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("releaseResource_result(");
+ boolean first = true;
+
+ sb.append("e:");
+ if (this.e == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.e);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("f:");
+ if (this.f == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.f);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ }
+
+ public static class nodeHeartbeat_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("nodeHeartbeat_args");
+
+ private static final org.apache.thrift.protocol.TField NODE_FIELD_DESC = new org.apache.thrift.protocol.TField("node", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+
+ public ClusterNodeInfo node; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ NODE((short)1, "node");
+
+ private static final Map byName = new HashMap();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // NODE
+ return NODE;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.NODE, new org.apache.thrift.meta_data.FieldMetaData("node", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ClusterNodeInfo.class)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(nodeHeartbeat_args.class, metaDataMap);
+ }
+
+ public nodeHeartbeat_args() {
+ }
+
+ public nodeHeartbeat_args(
+ ClusterNodeInfo node)
+ {
+ this();
+ this.node = node;
+ }
+
+ /**
+ * Performs a deep copy on other.
+ */
+ public nodeHeartbeat_args(nodeHeartbeat_args other) {
+ if (other.isSetNode()) {
+ this.node = new ClusterNodeInfo(other.node);
+ }
+ }
+
+ public nodeHeartbeat_args deepCopy() {
+ return new nodeHeartbeat_args(this);
+ }
+
+ @Override
+ public void clear() {
+ this.node = null;
+ }
+
+ public ClusterNodeInfo getNode() {
+ return this.node;
+ }
+
+ public nodeHeartbeat_args setNode(ClusterNodeInfo node) {
+ this.node = node;
+ return this;
+ }
+
+ public void unsetNode() {
+ this.node = null;
+ }
+
+ /** Returns true if field node is set (has been assigned a value) and false otherwise */
+ public boolean isSetNode() {
+ return this.node != null;
+ }
+
+ public void setNodeIsSet(boolean value) {
+ if (!value) {
+ this.node = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case NODE:
+ if (value == null) {
+ unsetNode();
+ } else {
+ setNode((ClusterNodeInfo)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case NODE:
+ return getNode();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case NODE:
+ return isSetNode();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof nodeHeartbeat_args)
+ return this.equals((nodeHeartbeat_args)that);
+ return false;
+ }
+
+ public boolean equals(nodeHeartbeat_args that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_node = true && this.isSetNode();
+ boolean that_present_node = true && that.isSetNode();
+ if (this_present_node || that_present_node) {
+ if (!(this_present_node && that_present_node))
+ return false;
+ if (!this.node.equals(that.node))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ public int compareTo(nodeHeartbeat_args other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+ nodeHeartbeat_args typedOther = (nodeHeartbeat_args)other;
+
+ lastComparison = Boolean.valueOf(isSetNode()).compareTo(typedOther.isSetNode());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetNode()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.node, typedOther.node);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField field;
+ iprot.readStructBegin();
+ while (true)
+ {
+ field = iprot.readFieldBegin();
+ if (field.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (field.id) {
+ case 1: // NODE
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.node = new ClusterNodeInfo();
+ this.node.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ // check for required fields of primitive type, which can't be checked in the validate method
+ validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ validate();
+
+ oprot.writeStructBegin(STRUCT_DESC);
+ if (this.node != null) {
+ oprot.writeFieldBegin(NODE_FIELD_DESC);
+ this.node.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("nodeHeartbeat_args(");
+ boolean first = true;
+
+ sb.append("node:");
+ if (this.node == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.node);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ }
+
+ public static class nodeHeartbeat_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("nodeHeartbeat_result");
+
+ private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0);
+ private static final org.apache.thrift.protocol.TField E_FIELD_DESC = new org.apache.thrift.protocol.TField("e", org.apache.thrift.protocol.TType.STRUCT, (short)1);
+ private static final org.apache.thrift.protocol.TField F_FIELD_DESC = new org.apache.thrift.protocol.TField("f", org.apache.thrift.protocol.TType.STRUCT, (short)2);
+
+ public NodeHeartbeatResponse success; // required
+ public DisallowedNode e; // required
+ public SafeModeException f; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ SUCCESS((short)0, "success"),
+ E((short)1, "e"),
+ F((short)2, "f");
+
+ private static final Map byName = new HashMap();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 0: // SUCCESS
+ return SUCCESS;
+ case 1: // E
+ return E;
+ case 2: // F
+ return F;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NodeHeartbeatResponse.class)));
+ tmpMap.put(_Fields.E, new org.apache.thrift.meta_data.FieldMetaData("e", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ tmpMap.put(_Fields.F, new org.apache.thrift.meta_data.FieldMetaData("f", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT)));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(nodeHeartbeat_result.class, metaDataMap);
+ }
+
+ public nodeHeartbeat_result() {
+ }
+
+ public nodeHeartbeat_result(
+ NodeHeartbeatResponse success,
+ DisallowedNode e,
+ SafeModeException f)
+ {
+ this();
+ this.success = success;
+ this.e = e;
+ this.f = f;
+ }
+
+ /**
+ * Performs a deep copy on other.
+ */
+ public nodeHeartbeat_result(nodeHeartbeat_result other) {
+ if (other.isSetSuccess()) {
+ this.success = new NodeHeartbeatResponse(other.success);
+ }
+ if (other.isSetE()) {
+ this.e = new DisallowedNode(other.e);
+ }
+ if (other.isSetF()) {
+ this.f = new SafeModeException(other.f);
+ }
+ }
+
+ public nodeHeartbeat_result deepCopy() {
+ return new nodeHeartbeat_result(this);
+ }
+
+ @Override
+ public void clear() {
+ this.success = null;
+ this.e = null;
+ this.f = null;
+ }
+
+ public NodeHeartbeatResponse getSuccess() {
+ return this.success;
+ }
+
+ public nodeHeartbeat_result setSuccess(NodeHeartbeatResponse success) {
+ this.success = success;
+ return this;
+ }
+
+ public void unsetSuccess() {
+ this.success = null;
+ }
+
+ /** Returns true if field success is set (has been assigned a value) and false otherwise */
+ public boolean isSetSuccess() {
+ return this.success != null;
+ }
+
+ public void setSuccessIsSet(boolean value) {
+ if (!value) {
+ this.success = null;
+ }
+ }
+
+ public DisallowedNode getE() {
+ return this.e;
+ }
+
+ public nodeHeartbeat_result setE(DisallowedNode e) {
+ this.e = e;
+ return this;
+ }
+
+ public void unsetE() {
+ this.e = null;
+ }
+
+ /** Returns true if field e is set (has been assigned a value) and false otherwise */
+ public boolean isSetE() {
+ return this.e != null;
+ }
+
+ public void setEIsSet(boolean value) {
+ if (!value) {
+ this.e = null;
+ }
+ }
+
+ public SafeModeException getF() {
+ return this.f;
+ }
+
+ public nodeHeartbeat_result setF(SafeModeException f) {
+ this.f = f;
+ return this;
+ }
+
+ public void unsetF() {
+ this.f = null;
+ }
+
+ /** Returns true if field f is set (has been assigned a value) and false otherwise */
+ public boolean isSetF() {
+ return this.f != null;
+ }
+
+ public void setFIsSet(boolean value) {
+ if (!value) {
+ this.f = null;
+ }
+ }
+
+ public void setFieldValue(_Fields field, Object value) {
+ switch (field) {
+ case SUCCESS:
+ if (value == null) {
+ unsetSuccess();
+ } else {
+ setSuccess((NodeHeartbeatResponse)value);
+ }
+ break;
+
+ case E:
+ if (value == null) {
+ unsetE();
+ } else {
+ setE((DisallowedNode)value);
+ }
+ break;
+
+ case F:
+ if (value == null) {
+ unsetF();
+ } else {
+ setF((SafeModeException)value);
+ }
+ break;
+
+ }
+ }
+
+ public Object getFieldValue(_Fields field) {
+ switch (field) {
+ case SUCCESS:
+ return getSuccess();
+
+ case E:
+ return getE();
+
+ case F:
+ return getF();
+
+ }
+ throw new IllegalStateException();
+ }
+
+ /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */
+ public boolean isSet(_Fields field) {
+ if (field == null) {
+ throw new IllegalArgumentException();
+ }
+
+ switch (field) {
+ case SUCCESS:
+ return isSetSuccess();
+ case E:
+ return isSetE();
+ case F:
+ return isSetF();
+ }
+ throw new IllegalStateException();
+ }
+
+ @Override
+ public boolean equals(Object that) {
+ if (that == null)
+ return false;
+ if (that instanceof nodeHeartbeat_result)
+ return this.equals((nodeHeartbeat_result)that);
+ return false;
+ }
+
+ public boolean equals(nodeHeartbeat_result that) {
+ if (that == null)
+ return false;
+
+ boolean this_present_success = true && this.isSetSuccess();
+ boolean that_present_success = true && that.isSetSuccess();
+ if (this_present_success || that_present_success) {
+ if (!(this_present_success && that_present_success))
+ return false;
+ if (!this.success.equals(that.success))
+ return false;
+ }
+
+ boolean this_present_e = true && this.isSetE();
+ boolean that_present_e = true && that.isSetE();
+ if (this_present_e || that_present_e) {
+ if (!(this_present_e && that_present_e))
+ return false;
+ if (!this.e.equals(that.e))
+ return false;
+ }
+
+ boolean this_present_f = true && this.isSetF();
+ boolean that_present_f = true && that.isSetF();
+ if (this_present_f || that_present_f) {
+ if (!(this_present_f && that_present_f))
+ return false;
+ if (!this.f.equals(that.f))
+ return false;
+ }
+
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return 0;
+ }
+
+ public int compareTo(nodeHeartbeat_result other) {
+ if (!getClass().equals(other.getClass())) {
+ return getClass().getName().compareTo(other.getClass().getName());
+ }
+
+ int lastComparison = 0;
+ nodeHeartbeat_result typedOther = (nodeHeartbeat_result)other;
+
+ lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetSuccess()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetE()).compareTo(typedOther.isSetE());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetE()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.e, typedOther.e);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ lastComparison = Boolean.valueOf(isSetF()).compareTo(typedOther.isSetF());
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ if (isSetF()) {
+ lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.f, typedOther.f);
+ if (lastComparison != 0) {
+ return lastComparison;
+ }
+ }
+ return 0;
+ }
+
+ public _Fields fieldForId(int fieldId) {
+ return _Fields.findByThriftId(fieldId);
+ }
+
+ public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException {
+ org.apache.thrift.protocol.TField field;
+ iprot.readStructBegin();
+ while (true)
+ {
+ field = iprot.readFieldBegin();
+ if (field.type == org.apache.thrift.protocol.TType.STOP) {
+ break;
+ }
+ switch (field.id) {
+ case 0: // SUCCESS
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.success = new NodeHeartbeatResponse();
+ this.success.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case 1: // E
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.e = new DisallowedNode();
+ this.e.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ case 2: // F
+ if (field.type == org.apache.thrift.protocol.TType.STRUCT) {
+ this.f = new SafeModeException();
+ this.f.read(iprot);
+ } else {
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ break;
+ default:
+ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, field.type);
+ }
+ iprot.readFieldEnd();
+ }
+ iprot.readStructEnd();
+
+ // check for required fields of primitive type, which can't be checked in the validate method
+ validate();
+ }
+
+ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException {
+ oprot.writeStructBegin(STRUCT_DESC);
+
+ if (this.isSetSuccess()) {
+ oprot.writeFieldBegin(SUCCESS_FIELD_DESC);
+ this.success.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetE()) {
+ oprot.writeFieldBegin(E_FIELD_DESC);
+ this.e.write(oprot);
+ oprot.writeFieldEnd();
+ } else if (this.isSetF()) {
+ oprot.writeFieldBegin(F_FIELD_DESC);
+ this.f.write(oprot);
+ oprot.writeFieldEnd();
+ }
+ oprot.writeFieldStop();
+ oprot.writeStructEnd();
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder sb = new StringBuilder("nodeHeartbeat_result(");
+ boolean first = true;
+
+ sb.append("success:");
+ if (this.success == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.success);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("e:");
+ if (this.e == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.e);
+ }
+ first = false;
+ if (!first) sb.append(", ");
+ sb.append("f:");
+ if (this.f == null) {
+ sb.append("null");
+ } else {
+ sb.append(this.f);
+ }
+ first = false;
+ sb.append(")");
+ return sb.toString();
+ }
+
+ public void validate() throws org.apache.thrift.TException {
+ // check for required fields
+ }
+
+ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException {
+ try {
+ write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException {
+ try {
+ read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in)));
+ } catch (org.apache.thrift.TException te) {
+ throw new java.io.IOException(te);
+ }
+ }
+
+ }
+
+ public static class nodeFeedback_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable {
+ private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("nodeFeedback_args");
+
+ private static final org.apache.thrift.protocol.TField HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("handle", org.apache.thrift.protocol.TType.STRING, (short)1);
+ private static final org.apache.thrift.protocol.TField RESOURCE_TYPES_FIELD_DESC = new org.apache.thrift.protocol.TField("resourceTypes", org.apache.thrift.protocol.TType.LIST, (short)2);
+ private static final org.apache.thrift.protocol.TField STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("stats", org.apache.thrift.protocol.TType.LIST, (short)3);
+
+ public String handle; // required
+ public List resourceTypes; // required
+ public List stats; // required
+
+ /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */
+ public enum _Fields implements org.apache.thrift.TFieldIdEnum {
+ HANDLE((short)1, "handle"),
+ RESOURCE_TYPES((short)2, "resourceTypes"),
+ STATS((short)3, "stats");
+
+ private static final Map byName = new HashMap();
+
+ static {
+ for (_Fields field : EnumSet.allOf(_Fields.class)) {
+ byName.put(field.getFieldName(), field);
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, or null if its not found.
+ */
+ public static _Fields findByThriftId(int fieldId) {
+ switch(fieldId) {
+ case 1: // HANDLE
+ return HANDLE;
+ case 2: // RESOURCE_TYPES
+ return RESOURCE_TYPES;
+ case 3: // STATS
+ return STATS;
+ default:
+ return null;
+ }
+ }
+
+ /**
+ * Find the _Fields constant that matches fieldId, throwing an exception
+ * if it is not found.
+ */
+ public static _Fields findByThriftIdOrThrow(int fieldId) {
+ _Fields fields = findByThriftId(fieldId);
+ if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!");
+ return fields;
+ }
+
+ /**
+ * Find the _Fields constant that matches name, or null if its not found.
+ */
+ public static _Fields findByName(String name) {
+ return byName.get(name);
+ }
+
+ private final short _thriftId;
+ private final String _fieldName;
+
+ _Fields(short thriftId, String fieldName) {
+ _thriftId = thriftId;
+ _fieldName = fieldName;
+ }
+
+ public short getThriftFieldId() {
+ return _thriftId;
+ }
+
+ public String getFieldName() {
+ return _fieldName;
+ }
+ }
+
+ // isset id assignments
+
+ public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
+ static {
+ Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
+ tmpMap.put(_Fields.HANDLE, new org.apache.thrift.meta_data.FieldMetaData("handle", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "SessionHandle")));
+ tmpMap.put(_Fields.RESOURCE_TYPES, new org.apache.thrift.meta_data.FieldMetaData("resourceTypes", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, ResourceType.class))));
+ tmpMap.put(_Fields.STATS, new org.apache.thrift.meta_data.FieldMetaData("stats", org.apache.thrift.TFieldRequirementType.DEFAULT,
+ new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST,
+ new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, NodeUsageReport.class))));
+ metaDataMap = Collections.unmodifiableMap(tmpMap);
+ org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(nodeFeedback_args.class, metaDataMap);
+ }
+
+ public nodeFeedback_args() {
+ }
+
+ public nodeFeedback_args(
+ String handle,
+ List resourceTypes,
+ List stats)
+ {
+ this();
+ this.handle = handle;
+ this.resourceTypes = resourceTypes;
+ this.stats = stats;
+ }
+
+ /**
+ * Performs a deep copy on other.
+ */
+ public nodeFeedback_args(nodeFeedback_args other) {
+ if (other.isSetHandle()) {
+ this.handle = other.handle;
+ }
+ if (other.isSetResourceTypes()) {
+ List __this__resourceTypes = new ArrayList();
+ for (ResourceType other_element : other.resourceTypes) {
+ __this__resourceTypes.add(other_element);
+ }
+ this.resourceTypes = __this__resourceTypes;
+ }
+ if (other.isSetStats()) {
+ List __this__stats = new ArrayList();
+ for (NodeUsageReport other_element : other.stats) {
+ __this__stats.add(new NodeUsageReport(other_element));
+ }
+ this.stats = __this__stats;
+ }
+ }
+
+ public nodeFeedback_args deepCopy() {
+ return new nodeFeedback_args(this);
+ }
+
+ @Override
+ public void clear() {
+ this.handle = null;
+ this.resourceTypes = null;
+ this.stats = null;
+ }
+
+ public String getHandle() {
+ return this.handle;
+ }
+
+ public nodeFeedback_args setHandle(String handle) {
+ this.handle = handle;
+ return this;
+ }
+
+ public void unsetHandle() {
+ this.handle = null;
+ }
+
+ /** Returns true if field handle is set (has been assigned a value) and false otherwise */
+ public boolean isSetHandle() {
+ return this.handle != null;
+ }
+
+ public void setHandleIsSet(boolean value) {
+ if (!value) {
+ this.handle = null;
+ }
+ }
+
+ public int getResourceTypesSize() {
+ return (this.resourceTypes == null) ? 0 : this.resourceTypes.size();
+ }
+
+ public java.util.Iterator getResourceTypesIterator() {
+ return (this.resourceTypes == null) ? null : this.resourceTypes.iterator();
+ }
+
+ public void addToResourceTypes(ResourceType elem) {
+ if (this.resourceTypes == null) {
+ this.resourceTypes = new ArrayList();
+ }
+ this.resourceTypes.add(elem);
+ }
+
+ public List getResourceTypes() {
+ return this.resourceTypes;
+ }
+
+ public nodeFeedback_args setResourceTypes(List resourceTypes) {
+ this.resourceTypes = resourceTypes;
+ return this;
+ }
+
+ public void unsetResourceTypes() {
+ this.resourceTypes = null;
+ }
+
+ /** Returns true if field resourceTypes is set (has been assigned a value) and false otherwise */
+ public boolean isSetResourceTypes() {
+ return this.resourceTypes != null;
+ }
+
+ public void setResourceTypesIsSet(boolean value) {
+ if (!value) {
+ this.resourceTypes = null;
+ }
+ }
+
+ public int getStatsSize() {
+ return (this.stats == null) ? 0 : this.stats.size();
+ }
+
+ public java.util.Iterator getStatsIterator() {
+ return (this.stats == null) ? null : this.stats.iterator();
+ }
+
+ public void addToStats(NodeUsageReport elem) {
+ if (this.stats == null) {
+ this.stats = new ArrayList