diff --git a/.checkstyle/checkstyle.xml b/.checkstyle/checkstyle.xml index e3a1344..d50c4b1 100644 --- a/.checkstyle/checkstyle.xml +++ b/.checkstyle/checkstyle.xml @@ -140,4 +140,8 @@ + + + + \ No newline at end of file diff --git a/.checkstyle/intellij-checkstyle.xml b/.checkstyle/intellij-checkstyle.xml index bbe094e..7d3e860 100644 --- a/.checkstyle/intellij-checkstyle.xml +++ b/.checkstyle/intellij-checkstyle.xml @@ -136,4 +136,8 @@ + + + + \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATES/BUG_REPORT.yml b/.github/ISSUE_TEMPLATES/BUG_REPORT.yml index 274ab77..3a3cd57 100644 --- a/.github/ISSUE_TEMPLATES/BUG_REPORT.yml +++ b/.github/ISSUE_TEMPLATES/BUG_REPORT.yml @@ -57,9 +57,9 @@ body: label: Version description: What version of our software are you running? options: - - 1.5.2 (Default) - - 1.5.1 - - older (<1.5.1) + - 2.2.0 (Default) + - 1.5.2 + - older (<1.5.2) validations: required: true - type: textarea diff --git a/.gitignore b/.gitignore index 96cd907..b0458bb 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,6 @@ target/ *.iws # Visual Studio Code -.vscode/ \ No newline at end of file +.vscode/ + +mqjms.log.* diff --git a/LICENSE b/LICENSE index 0a24bd8..9df4e14 100644 --- a/LICENSE +++ b/LICENSE @@ -175,7 +175,7 @@ Apache License END OF TERMS AND CONDITIONS - Copyright 2017 IBM Corporation + Copyright 2017, 2024 IBM Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/README.md b/README.md index b45baa2..6f05c11 100644 --- a/README.md +++ b/README.md @@ -1,72 +1,81 @@ # Kafka Connect sink connector for IBM MQ + kafka-connect-mq-sink is a [Kafka Connect](http://kafka.apache.org/documentation.html#connect) sink connector for copying data from Apache Kafka into IBM MQ. The connector is supplied as source code which you can easily build into a JAR file. **Note**: A source connector for IBM MQ is also available on [GitHub](https://github.com/ibm-messaging/kafka-connect-mq-source). - ## Contents - - [Building the connector](#building-the-connector) - - [Running the connector](#running-the-connector) - - [Running the connector with Docker](#running-with-docker) - - [Deploying the connector to Kubernetes](#deploying-to-kubernetes) - - [Data formats](#data-formats) - - [Security](#security) - - [Configuration](#configuration) - - [Troubleshooting](#troubleshooting) - - [Support](#support) - - [Issues and contributions](#issues-and-contributions) - - [License](#license) - +- [Building the connector](#building-the-connector) +- [Running the connector](#running-the-connector) +- [Running the connector with Docker](#running-with-docker) +- [Deploying the connector to Kubernetes](#deploying-to-kubernetes) +- [Data formats](#data-formats) +- [Security](#security) +- [Configuration](#configuration) +- [Exactly-once message delivery semantics](#exactly-once-message-delivery-semantics) +- [Troubleshooting](#troubleshooting) +- [Support](#support) +- [Issues and contributions](#issues-and-contributions) +- [License](#license) ## Building the connector To build the connector, you must have the following installed: -* [git](https://git-scm.com/) -* [Maven 3.0 or later](https://maven.apache.org) -* Java 8 or later + +- [git](https://git-scm.com/) +- [Maven 3.0 or later](https://maven.apache.org) +- Java 8 or later Clone the repository with the following command: + ```shell git clone https://github.com/ibm-messaging/kafka-connect-mq-sink.git ``` Change directory into the `kafka-connect-mq-sink` directory: + ```shell cd kafka-connect-mq-sink ``` Run the unit tests: + ```shell mvn test ``` Run the integration tests (requires Docker): + ```shell mvn integration-test ``` Build the connector using Maven: + ```shell mvn clean package ``` Once built, the output is a single JAR `target/kafka-connect-mq-sink--jar-with-dependencies.jar` which contains all of the required dependencies. +**NOTE:** With the 2.0.0 release the base Kafka Connect library has been updated from 2.6.0 to 3.4.1. ## Running the connector For step-by-step instructions, see the following guides for running the connector: - - connecting to Apache Kafka [running locally](UsingMQwithKafkaConnect.md) - - connecting to an installation of [IBM Event Streams](https://ibm.github.io/event-streams/connecting/mq/sink) + +- connecting to Apache Kafka [running locally](UsingMQwithKafkaConnect.md) +- connecting to an installation of [IBM Event Streams](https://ibm.github.io/event-streams/connecting/mq/sink) To run the connector, you must have: -* The JAR from building the connector -* A properties file containing the configuration for the connector -* Apache Kafka 2.6.2 or later, either standalone or included as part of an offering such as IBM Event Streams -* IBM MQ v9 or later, or the IBM MQ on Cloud service + +- The JAR from building the connector +- A properties file containing the configuration for the connector +- Apache Kafka 2.0.0 or later, either standalone or included as part of an offering such as IBM Event Streams. +- IBM MQ v9 or later, or the IBM MQ on Cloud service The connector can be run in a Kafka Connect worker in either standalone (single process) or distributed mode. It's a good idea to start in standalone mode. @@ -74,11 +83,11 @@ The connector can be run in a Kafka Connect worker in either standalone (single You need two configuration files, one for the configuration that applies to all of the connectors such as the Kafka bootstrap servers, and another for the configuration specific to the MQ sink connector such as the connection information for your queue manager. For the former, the Kafka distribution includes a file called `connect-standalone.properties` that you can use as a starting point. For the latter, you can use `config/mq-sink.properties` in this repository. -The connector connects to MQ using either a client or a bindings connection. For a client connection, you must provide the name of the queue manager, the connection name (one or more host/port pairs) and the channel name. In addition, you can provide a user name and password if the queue manager is configured to require them for client connections. If you look at the supplied `config/mq-sink.properties`, you'll see how to specify the configuration required. For a bindings connection, you must provide provide the name of the queue manager and also run the Kafka Connect worker on the same system as the queue manager. +The connector connects to MQ using either a client or a bindings connection. For a client connection, you must provide the name of the queue manager, the connection name (one or more host/port pairs) and the channel name. In addition, you can provide a user name and password if the queue manager is configured to require them for client connections. If you look at the supplied `config/mq-sink.properties`, you'll see how to specify the configuration required. For a bindings connection, you must provide the name of the queue manager and also run the Kafka Connect worker on the same system as the queue manager. To run the connector in standalone mode from the directory into which you installed Apache Kafka, you use a command like this: -``` shell +```shell bin/connect-standalone.sh connect-standalone.properties mq-sink.properties ``` @@ -88,16 +97,19 @@ You need an instance of Kafka Connect running in distributed mode. The Kafka dis To start the MQ connector, you can use `config/mq-sink.json` in this repository after replacing all placeholders and use a command like this: -``` shell +```shell curl -X POST -H "Content-Type: application/json" http://localhost:8083/connectors \ --data "@./config/mq-sink.json" ``` - ## Running with Docker This repository includes an example Dockerfile to run Kafka Connect in distributed mode. It also adds in the MQ sink connector as an available connector plugin. It uses the default `connect-distributed.properties` and `connect-log4j.properties` files. +Before running the Docker commands, make sure to replace `` with the desired tag value (e.g., `v2.0.0`, `latest`, etc.) for the Docker image. + +To run the Kafka Connect with MQ sink connector using Docker, follow these steps: + 1. `mvn clean package` 1. `docker build -t kafkaconnect-with-mq-sink: .` 1. `docker run -p 8083:8083 kafkaconnect-with-mq-sink:` @@ -110,12 +122,11 @@ docker run -v $(pwd)/config:/opt/kafka/config -p 8083:8083 kafkaconnect-with-mq- To start the MQ connector, you can use `config/mq-sink.json` in this repository after replacing all placeholders and use a command like this: -``` shell +```shell curl -X POST -H "Content-Type: application/json" http://localhost:8083/connectors \ --data "@./config/mq-sink.json" ``` - ## Deploying to Kubernetes This repository includes a Kubernetes yaml file called `kafka-connect.yaml`. This will create a deployment to run Kafka Connect in distributed mode and a service to access the deployment. @@ -125,11 +136,13 @@ The deployment assumes the existence of a Secret called `connect-distributed-con ### Creating Kafka Connect configuration Secret and ConfigMap Create Secret for Kafka Connect configuration: + 1. `cp kafka/config/connect-distributed.properties connect-distributed.properties.orig` 1. `sed '/^#/d;/^[[:space:]]*$/d' < connect-distributed.properties.orig > connect-distributed.properties` 1. `kubectl -n create secret generic connect-distributed-config --from-file=connect-distributed.properties` Create ConfigMap for Kafka Connect Log4j configuration: + 1. `cp kafka/config/connect-log4j.properties connect-log4j.properties.orig` 1. `sed '/^#/d;/^[[:space:]]*$/d' < connect-log4j.properties.orig > connect-log4j.properties` 1. `kubectl -n create configmap connect-log4j-config --from-file=connect-log4j.properties` @@ -171,7 +184,6 @@ The following instructions assume you are running on OpenShift and have Strimzi 1. `oc apply -f kafkaconnector.yaml` to start the connector. 1. `oc get kafkaconnector` to list the connectors. You can use `oc describe` to get more details on the connector, such as its status. - ## Data formats Kafka Connect is very flexible but it's important to understand the way that it processes messages to end up with a reliable system. When the connector encounters a message that it cannot process, it stops rather than throwing the message away. Therefore, you need to make sure that the configuration you use can handle the messages the connector will process. @@ -200,12 +212,15 @@ When you set *mq.message.body.jms=true*, the MQ messages are generated as JMS me There's no single configuration that will always be right, but here are some high-level suggestions. -* Message values are treated as byte arrays, pass byte array into MQ message -``` +- Message values are treated as byte arrays, pass byte array into MQ message + +```shell value.converter=org.apache.kafka.connect.converters.ByteArrayConverter ``` -* Message values are treated as strings, pass string into MQ message -``` + +- Message values are treated as strings, pass string into MQ message + +```shell value.converter=org.apache.kafka.connect.storage.StringConverter ``` @@ -270,7 +285,6 @@ In MQ, the correlation ID is a 24-byte array. As a string, the connector represe The connector can be configured to set the Kafka topic, partition and offset as JMS message properties using the `mq.message.builder.*.property` configuration values. If configured, the topic is set as a string property, the partition as an integer property and the offset as a long property. Because these values are set using JMS message properties, they only have an effect if `mq.message.body.jms=true` is set. - ## Security The connector supports authentication with user name and password and also connections secured with TLS using a server-side certificate and mutual authentication with client-side certificates. You can also choose whether to use connection security parameters (MQCSP) depending on the security settings you're using in MQ. @@ -289,60 +303,64 @@ You will need to put the public part of the client's certificate in the queue ma For troubleshooting, or to better understand the handshake performed by the IBM MQ Java client application in combination with your specific JSSE provider, you can enable debugging by setting `javax.net.debug=ssl` in the JVM environment. - ## Configuration The configuration options for the Kafka Connect sink connector for IBM MQ are as follows: -| Name | Description | Type | Default | Valid values | -| --------------------------------------- | ---------------------------------------------------------------------- | ------- | -------------- | --------------------------------- | -| topics or topics.regex | List of Kafka source topics | string | | topic1[,topic2,...] | -| mq.queue.manager | The name of the MQ queue manager | string | | MQ queue manager name | -| mq.connection.mode | The connection mode - bindings or client | string | client | client, bindings | -| mq.connection.name.list | List of connection names for queue manager | string | | host(port)[,host(port),...] | -| mq.channel.name | The name of the server-connection channel | string | | MQ channel name | -| mq.queue | The name of the target MQ queue | string | | MQ queue name | -| mq.user.name | The user name for authenticating with the queue manager | string | | User name | -| mq.password | The password for authenticating with the queue manager | string | | Password | -| mq.user.authentication.mqcsp | Whether to use MQ connection security parameters (MQCSP) | boolean | true | | -| mq.ccdt.url | The URL for the CCDT file containing MQ connection details | string | | URL for obtaining a CCDT file | -| mq.message.builder | The class used to build the MQ message | string | | Class implementing MessageBuilder | -| mq.message.body.jms | Whether to generate the message body as a JMS message type | boolean | false | | -| mq.time.to.live | Time-to-live in milliseconds for messages sent to MQ | long | 0 (unlimited) | [0,...] | -| mq.persistent | Send persistent or non-persistent messages to MQ | boolean | true | | -| mq.ssl.cipher.suite | The name of the cipher suite for TLS (SSL) connection | string | | Blank or valid cipher suite | -| mq.ssl.peer.name | The distinguished name pattern of the TLS (SSL) peer | string | | Blank or DN pattern | -| mq.ssl.keystore.location | The path to the JKS keystore to use for SSL (TLS) connections | string | JVM keystore | Local path to a JKS file | -| mq.ssl.keystore.password | The password of the JKS keystore to use for SSL (TLS) connections | string | | | -| mq.ssl.truststore.location | The path to the JKS truststore to use for SSL (TLS) connections | string | JVM truststore | Local path to a JKS file | -| mq.ssl.truststore.password | The password of the JKS truststore to use for SSL (TLS) connections | string | | | -| mq.ssl.use.ibm.cipher.mappings | Whether to set system property to control use of IBM cipher mappings | boolean | | | -| mq.message.builder.key.header | The JMS message header to set from the Kafka record key | string | | JMSCorrelationID | -| mq.kafka.headers.copy.to.jms.properties | Whether to copy Kafka headers to JMS message properties | boolean | false | | -| mq.message.builder.value.converter | The class and prefix for message builder's value converter | string | | Class implementing Converter | -| mq.message.builder.topic.property | The JMS message property to set from the Kafka topic | string | | Blank or valid JMS property name | -| mq.message.builder.partition.property | The JMS message property to set from the Kafka partition | string | | Blank or valid JMS property name | -| mq.message.builder.offset.property | The JMS message property to set from the Kafka offset | string | | Blank or valid JMS property name | -| mq.reply.queue | The name of the reply-to queue | string | | MQ queue name or queue URI | -| mq.retry.backoff.ms | Wait time, in milliseconds, before retrying after retriable exceptions | long | 60000 | [0,...] | - +| Name | Description | Type | Default | Valid values | +| --------------------------------------- | --------------------------------------------------------------------------------------------------------- | ------- | -------------- | --------------------------------- | +| topics or topics.regex | List of Kafka source topics | string | | topic1[,topic2,...] | +| mq.queue.manager | The name of the MQ queue manager | string | | MQ queue manager name | +| mq.connection.mode | The connection mode - bindings or client | string | client | client, bindings | +| mq.connection.name.list | List of connection names for queue manager | string | | host(port)[,host(port),...] | +| mq.channel.name | The name of the server-connection channel | string | | MQ channel name | +| mq.queue | The name of the target MQ queue | string | | MQ queue name | +| mq.exactly.once.state.queue | The name of the MQ queue used to store state when running with exactly-once semantics | string | | MQ state queue name | +| mq.user.name | The user name for authenticating with the queue manager | string | | User name | +| mq.password | The password for authenticating with the queue manager | string | | Password | +| mq.user.authentication.mqcsp | Whether to use MQ connection security parameters (MQCSP) | boolean | true | | +| mq.ccdt.url | The URL for the CCDT file containing MQ connection details | string | | URL for obtaining a CCDT file | +| mq.message.builder | The class used to build the MQ message | string | | Class implementing MessageBuilder | +| mq.message.body.jms | Whether to generate the message body as a JMS message type | boolean | false | | +| mq.time.to.live | Time-to-live in milliseconds for messages sent to MQ | long | 0 (unlimited) | [0,...] | +| mq.persistent | Send persistent or non-persistent messages to MQ | boolean | true | | +| mq.ssl.cipher.suite | The name of the cipher suite for TLS (SSL) connection | string | | Blank or valid cipher suite | +| mq.ssl.peer.name | The distinguished name pattern of the TLS (SSL) peer | string | | Blank or DN pattern | +| mq.ssl.keystore.location | The path to the JKS keystore to use for SSL (TLS) connections | string | JVM keystore | Local path to a JKS file | +| mq.ssl.keystore.password | The password of the JKS keystore to use for SSL (TLS) connections | string | | | +| mq.ssl.truststore.location | The path to the JKS truststore to use for SSL (TLS) connections | string | JVM truststore | Local path to a JKS file | +| mq.ssl.truststore.password | The password of the JKS truststore to use for SSL (TLS) connections | string | | | +| mq.ssl.use.ibm.cipher.mappings | Whether to set system property to control use of IBM cipher mappings | boolean | | | +| mq.message.builder.key.header | The JMS message header to set from the Kafka record key | string | | JMSCorrelationID | +| mq.kafka.headers.copy.to.jms.properties | Whether to copy Kafka headers to JMS message properties | boolean | false | | +| mq.message.builder.value.converter | The class and prefix for message builder's value converter | string | | Class implementing Converter | +| mq.message.builder.topic.property | The JMS message property to set from the Kafka topic | string | | Blank or valid JMS property name | +| mq.message.builder.partition.property | The JMS message property to set from the Kafka partition | string | | Blank or valid JMS property name | +| mq.message.builder.offset.property | The JMS message property to set from the Kafka offset | string | | Blank or valid JMS property name | +| mq.reply.queue | The name of the reply-to queue | string | | MQ queue name or queue URI | +| mq.retry.backoff.ms | Wait time, in milliseconds, before retrying after retriable exceptions | long | 60000 | [0,...] | +| mq.message.mqmd.write | Whether to enable a custom message builder to write MQ message descriptors | boolean | false | | +| mq.message.mqmd.context | Message context to set on the destination queue. This is required when setting some message descriptors. | string | | `IDENTITY`, `ALL` | ### Using a CCDT file + Some of the connection details for MQ can be provided in a [CCDT file](https://www.ibm.com/support/knowledgecenter/en/SSFKSJ_9.1.0/com.ibm.mq.con.doc/q016730_.htm) by setting `mq.ccdt.url` in the MQ sink connector configuration file. If using a CCDT file the `mq.connection.name.list` and `mq.channel.name` configuration options are not required. ### Externalizing secrets + [KIP 297](https://cwiki.apache.org/confluence/display/KAFKA/KIP-297%3A+Externalizing+Secrets+for+Connect+Configurations) introduced a mechanism to externalize secrets to be used as configuration for Kafka connectors. #### Example: externalizing secrets with FileConfigProvider Given a file `mq-secrets.properties` with the contents: -``` + +```shell secret-key=password ``` Update the worker configuration file to specify the FileConfigProvider which is included by default: -``` +```shell # Additional properties for the worker configuration to enable use of ConfigProviders # multiple comma-separated provider types can be specified here config.providers=file @@ -351,7 +369,7 @@ config.providers.file.class=org.apache.kafka.common.config.provider.FileConfigPr Update the connector configuration file to reference `secret-key` in the file: -``` +```shell mq.password=${file:mq-secret.properties:secret-key} ``` @@ -359,27 +377,107 @@ mq.password=${file:mq-secret.properties:secret-key} To use a file for the `mq.password` in Kubernetes, you create a Secret using the file as described in [the Kubernetes docs](https://kubernetes.io/docs/concepts/configuration/secret/#using-secrets-as-files-from-a-pod). +## Exactly-once message delivery semantics + +The MQ sink connector provides at-least-once message delivery by default. This means that each MQ message will be delivered to Kafka, but in failure scenarios it is possible to have duplicated messages delivered to Kafka. + +Version 2.0.0 of the MQ sink connector introduced exactly-once message delivery semantics. An additional MQ queue is used to store the state of message deliveries. When exactly-once delivery is enabled MQ messages are delivered to Kafka with no duplicated messages. + +### Exactly-once delivery Kafka Connect worker configuration + +To enable exactly-once delivery, the MQ sink connector must be run on Kafka Connect version 2.0.0 or later. + +**Note**: Exactly-once support for sink connectors is only available in [distributed mode](#running-in-distributed-mode); distributed Connect workers cannot provide exactly-once delivery semantics. Kafka Connect is in distributed mode when [running the connector with Docker](#running-with-docker) and when [deploying the connector to Kubernetes](#deploying-to-kubernetes). + +### Exactly-once delivery MQ sink connector configuration + +To enable exactly-once delivery, the MQ sink connector must be configured the `mq.exactly.once.state.queue` property set to the name of a pre-configured MQ queue on the same queue manager as the sink MQ queue. + +Exactly-once delivery requires that only a single connector task can run in the Kafka Connect instance, hence the `tasks.max` property must be set to `1` to ensure that failure scenarios do not cause duplicated messages to be delivered. + +To achieve exactly-once delivery with the MQ sink connector, it is essential to configure its consumer group to ignore records in aborted transactions. You can find detailed instructions in the [Kafka documentation](https://kafka.apache.org/documentation/#connect_exactlyoncesink). Notably, this configuration does not have any additional ACL (Access Control List) requirements. + +To start the MQ sink connector with exactly-once delivery, the `config/mq-sink-exactly-once.json` file in this repository can be used as a connector configuration template. + +**Note**: Exactly-once delivery requires a clear state queue on start-up otherwise the connector will behave as if it is recovering from a failure state and will attempt to get undelivered messages recorded in the out-of-date state message. Therefore, ensure that the state queue is empty each time exactly-once delivery is enabled (especially if re-enabling the exactly once feature). + +### Exactly-once delivery MQ requirements + +The following values are recommended across MQ to facilitate the exactly-once behaviour: + +- On the channel used for Kafka Connect, `HBINT` should be set to 30 seconds to allow MQ transaction rollbacks to occur more quickly in failure scenarios. +- On the state queue, `DEFSOPT` should be set to `EXCL` to ensure the state queue share option is exclusive. + +Exactly-once delivery requires that messages are set to not expire and that all messages on state queue are all persistent (this is to ensure correct behaviour around queue manager restarts). + +### Exactly-once failure scenarios + +The MQ sink connector is designed to fail on start-up in certain cases to ensure that exactly-once delivery is not compromised. +In some of these failure scenarios, it will be necessary for an MQ administrator to remove messages from the exactly-once state queue before the MQ sink connector can start up and begin to deliver messages from the sink queue again. In these cases, the MQ sink connector will have the `FAILED` status and the Kafka Connect logs will describe any required administrative action. ## Troubleshooting +### Connector in a `FAILED` state + +If the connector experiences a non retriable error then a ConnectException will cause the connector to go in to a `FAILED` state. This will require a manual restart using the Kafka Connect REST API to restart the connector. + ### Unable to connect to Kafka You may receive an `org.apache.kafka.common.errors.SslAuthenticationException: SSL handshake failed` error when trying to run the MQ sink connector using SSL to connect to your Kafka cluster. In the case that the error is caused by the following exception: `Caused by: java.security.cert.CertificateException: No subject alternative DNS name matching XXXXX found.`, Java may be replacing the IP address of your cluster with the corresponding hostname in your `/etc/hosts` file. For example, to push Docker images to a custom Docker repository, you may add an entry in this file which corresponds to the IP of your repository e.g. `123.456.78.90 mycluster.icp`. To fix this, you can comment out this line in your `/etc/hosts` file. ### Unsupported cipher suite -When configuring TLS connection to MQ, you may find that the queue manager rejects the cipher suite, in spite of the name looking correct. There are two different naming conventions for cipher suites (https://www.ibm.com/support/knowledgecenter/SSFKSJ_9.1.0/com.ibm.mq.dev.doc/q113220_.htm). Setting the configuration option `mq.ssl.use.ibm.cipher.mappings=false` often resolves cipher suite problems. +When configuring TLS connection to MQ, you may find that the queue manager rejects the cipher suite, in spite of the name looking correct. There are two different naming conventions for cipher suites (). Setting the configuration option `mq.ssl.use.ibm.cipher.mappings=false` often resolves cipher suite problems. + +### `MQRC_NOT_AUTHORIZED` exception + +When attempting to send a message to an IBM MQ queue, an MQException with code `MQRC_NOT_AUTHORIZED` (reason code `2035`) and completion code 2 is thrown. This indicates insufficient permissions on the queue and the queue manager. + +#### Resolving the problem + +1. **Review permissions**: Ensure that the user has necessary permissions for accessing the queue and the queue manager. +2. **Grant authority**: If the user does not have the necessary permissions, assign required authorities to the user. +3. **Set Context**: Set `WMQ_MQMD_MESSAGE_CONTEXT` property for required properties. + + Configure the `mq.message.mqmd.context` property according to the message context. Options include: + - `ALL`, which corresponds to `WMQ_MDCTX_SET_ALL_CONTEXT` + - `IDENTITY`, mapped to `WMQ_MDCTX_SET_IDENTITY_CONTEXT` + + **Important:** If your message contains any of the following properties, you must ensure that `WMQ_MQMD_MESSAGE_CONTEXT` is set to either `WMQ_MDCTX_SET_IDENTITY_CONTEXT` or `WMQ_MDCTX_SET_ALL_CONTEXT`: + - JMS_IBM_MQMD_UserIdentifier + - JMS_IBM_MQMD_AccountingToken + - JMS_IBM_MQMD_ApplIdentityData + + Similarly, if your message includes any of the following properties, set the `WMQ_MQMD_MESSAGE_CONTEXT` field to `WMQ_MDCTX_SET_ALL_CONTEXT`: + - JMS_IBM_MQMD_PutApplType + - JMS_IBM_MQMD_PutApplName + - JMS_IBM_MQMD_PutDate + - JMS_IBM_MQMD_PutTime + - JMS_IBM_MQMD_ApplOriginData + + Other message properties do not require the `mq.message.mqmd.context` property. + +#### Additional tips + +- Verify that the length of all properties are correctly set within the allowed limit. +- Do not set the [`JMS_IBM_MQMD_BackoutCount`](https://www.ibm.com/docs/en/ibm-mq/9.3?topic=descriptor-backoutcount-mqlong-mqmd) property. +- Refer to the IBM MQ documentation for detailed configuration guidance: + + - [IBM MQ JMS Message Object Properties](https://www.ibm.com/docs/en/ibm-mq/9.3?topic=application-jms-message-object-properties): This documentation provides details about various properties that can be set on IBM MQ JMS message objects, including their names, types, and descriptions. + - [IBM MQ Developer Community](https://community.ibm.com/community/user/integration/home): The developer community for IBM MQ, where you can find forums, articles, and resources related to development and troubleshooting for IBM MQ. + - [IBM MQ troubleshooting guide](https://www.ibm.com/docs/en/ibm-mq/9.3?topic=mq-troubleshooting-support): IBM guide for troubleshooting common issues and errors in IBM MQ. ## Support -Commercial support for this connector is available for customers with a support entitlement for [IBM Event Automation](https://www.ibm.com/products/event-automation) or [IBM Cloud Pak for Integration](https://www.ibm.com/cloud/cloud-pak-for-integration). +A commercially supported version of this connector is available for customers with a support entitlement for [IBM Event Streams](https://www.ibm.com/cloud/event-streams) or [IBM Cloud Pak for Integration](https://www.ibm.com/cloud/cloud-pak-for-integration). ## Issues and contributions + For issues relating specifically to this connector, please use the [GitHub issue tracker](https://github.com/ibm-messaging/kafka-connect-mq-sink/issues). If you do want to submit a Pull Request related to this connector, please read the [contributing guide](CONTRIBUTING.md) first to understand how to sign your commits. ## License -Copyright 2017, 2020, 2023 IBM Corporation +Copyright 2017, 2020, 2023, 2024 IBM Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/config/mq-sink-exactly-once.json b/config/mq-sink-exactly-once.json new file mode 100644 index 0000000..450e530 --- /dev/null +++ b/config/mq-sink-exactly-once.json @@ -0,0 +1,19 @@ +{ + "name": "mq-sink-exactly-once", + "config": + { + "connector.class": "com.ibm.eventstreams.connect.mqsink.MQSinkConnector", + "tasks.max": "1", + "topics": "", + + "key.converter": "org.apache.kafka.connect.storage.StringConverter", + "value.converter": "org.apache.kafka.connect.storage.StringConverter", + + "mq.queue.manager": "", + "mq.connection.name.list": "", + "mq.channel.name": "", + "mq.queue": "", + "mq.message.builder": "com.ibm.eventstreams.connect.mqsink.builders.DefaultMessageBuilder", + "mq.exactly.once.state.queue": "" + } +} diff --git a/config/mq-sink.properties b/config/mq-sink.properties index ab9450e..520be39 100644 --- a/config/mq-sink.properties +++ b/config/mq-sink.properties @@ -1,4 +1,4 @@ -# Copyright 2017, 2020 IBM Corporation +# Copyright 2017, 2020, 2023, 2024 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/pom.xml b/pom.xml index e6b0d39..ebb7fe9 100644 --- a/pom.xml +++ b/pom.xml @@ -1,6 +1,6 @@ + maven-assembly-plugin 3.1.1 @@ -280,6 +321,15 @@ + + org.apache.maven.plugins + maven-surefire-plugin + 3.1.0 + + false + 1 + + - + \ No newline at end of file diff --git a/src/assembly/package.xml b/src/assembly/package.xml index b28d245..a1e6181 100644 --- a/src/assembly/package.xml +++ b/src/assembly/package.xml @@ -1,6 +1,6 @@ failure happens the next time we call. + .when(jmsWorkerSpy).send(any(SinkRecord.class)); + + assertThrows(ConnectException.class, () -> { + mqSinkTask.put(sinkRecords); + }); + + mqRestApiHelper.sendCommand(START_CHANNEL); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)).isEmpty(); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME)).isEmpty(); + + mqSinkTask.put(sinkRecords); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly( + "Message with offset 0 ", + "Message with offset 1 ", + "Message with offset 2 ", + "Message with offset 3 ", + "Message with offset 4 ", + "Message with offset 5 ", + "Message with offset 6 ", + "Message with offset 7 ", + "Message with offset 8 ", + "Message with offset 9 "); + final List stateQueueMessages = browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME); + assertThat(stateQueueMessages) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .isEmpty(); + } + + @Test + public void testCrashAfterMQCommitBeforeKafkaCommit() throws Exception { + final Map connectorProps = getConnectionDetails(); + + MQSinkTask mqSinkTask = getMqSinkTask(connectorProps); + + final List sinkRecords = createSinkRecordsFromOffsets(Arrays.asList( + 124L, + 125L, + 126L)); + + mqSinkTask.put(sinkRecords); + + List stateQueueMessages = browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME); + assertThat(stateQueueMessages) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .isEmpty(); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly( + "Message with offset 124 ", + "Message with offset 125 ", + "Message with offset 126 "); + + // Closest we can simulate a connect "crash", the idea being that this would + // happen after MQ commit, before Kafka committed the records + mqSinkTask.stop(); + mqSinkTask = getMqSinkTask(connectorProps); + + // Put called again with the same records + a few more. + final List newSinkRecords = createSinkRecordsFromOffsets(Arrays.asList( + 127L, + 128L, + 129L)); + sinkRecords.addAll(newSinkRecords); + mqSinkTask.put(sinkRecords); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly( + "Message with offset 124 ", + "Message with offset 125 ", // Duplicate Message + "Message with offset 126 ", // Duplicate Message + "Message with offset 124 ", // Duplicate Message + "Message with offset 125 ", + "Message with offset 126 ", + "Message with offset 127 ", + "Message with offset 128 ", + "Message with offset 129 "); + stateQueueMessages = browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME); + assertThat(stateQueueMessages) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .isEmpty(); + } +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskAuthIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskAuthIT.java index 93a680a..4215c77 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskAuthIT.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskAuthIT.java @@ -1,5 +1,5 @@ /** - * Copyright 2022 IBM Corporation + * Copyright 2022, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,7 +21,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.TimeoutException; import javax.jms.Connection; import javax.jms.Destination; @@ -34,49 +33,53 @@ import org.junit.ClassRule; import org.junit.Test; import org.testcontainers.containers.GenericContainer; -import org.testcontainers.containers.output.WaitingConsumer; +import org.testcontainers.containers.wait.strategy.Wait; +import com.github.dockerjava.api.model.ExposedPort; +import com.github.dockerjava.api.model.HostConfig; +import com.github.dockerjava.api.model.PortBinding; +import com.github.dockerjava.api.model.Ports; import com.ibm.msg.client.jms.JmsConnectionFactory; import com.ibm.msg.client.jms.JmsFactoryFactory; import com.ibm.msg.client.wmq.WMQConstants; public class MQSinkTaskAuthIT { - private static final String QMGR_NAME = "MYAUTHQMGR"; - private static final String QUEUE_NAME = "DEV.QUEUE.2"; - private static final String CHANNEL_NAME = "DEV.APP.SVRCONN"; - private static final String APP_PASSWORD = "MySuperSecretPassword"; - + public static final boolean USER_AUTHENTICATION_MQCSP = true; @ClassRule - public static GenericContainer MQ_CONTAINER = new GenericContainer<>("icr.io/ibm-messaging/mq:latest") - .withEnv("LICENSE", "accept") - .withEnv("MQ_QMGR_NAME", QMGR_NAME) - .withEnv("MQ_ENABLE_EMBEDDED_WEB_SERVER", "false") - .withEnv("MQ_APP_PASSWORD", APP_PASSWORD) - .withExposedPorts(1414); - + final public static GenericContainer MQ_CONTAINER = new GenericContainer<>(AbstractJMSContextIT.MQ_IMAGE) + .withEnv("LICENSE", "accept") + .withEnv("MQ_QMGR_NAME", AbstractJMSContextIT.QMGR_NAME) + .withEnv("MQ_APP_PASSWORD", AbstractJMSContextIT.APP_PASSWORD) + .withExposedPorts(AbstractJMSContextIT.TCP_MQ_EXPOSED_PORT, AbstractJMSContextIT.REST_API_EXPOSED_PORT) + .withCreateContainerCmdModifier(cmd -> cmd.withHostConfig( + new HostConfig().withPortBindings( + new PortBinding(Ports.Binding.bindPort(AbstractJMSContextIT.TCP_MQ_HOST_PORT), + new ExposedPort(AbstractJMSContextIT.TCP_MQ_EXPOSED_PORT)), + new PortBinding(Ports.Binding.bindPort(AbstractJMSContextIT.REST_API_HOST_PORT), + new ExposedPort(AbstractJMSContextIT.REST_API_EXPOSED_PORT))))) + .waitingFor(Wait.forListeningPort()); @Test public void testAuthenticatedQueueManager() throws Exception { - waitForQueueManagerStartup(); - - Map connectorProps = new HashMap<>(); - connectorProps.put("mq.queue.manager", QMGR_NAME); - connectorProps.put("mq.connection.mode", "client"); - connectorProps.put("mq.connection.name.list", "localhost(" + MQ_CONTAINER.getMappedPort(1414).toString() + ")"); - connectorProps.put("mq.channel.name", CHANNEL_NAME); - connectorProps.put("mq.queue", QUEUE_NAME); - connectorProps.put("mq.user.authentication.mqcsp", "true"); - connectorProps.put("mq.user.name", "app"); - connectorProps.put("mq.password", APP_PASSWORD); - connectorProps.put("mq.message.builder", "com.ibm.eventstreams.connect.mqsink.builders.DefaultMessageBuilder"); - - MQSinkTask newConnectTask = new MQSinkTask(); + final Map connectorProps = new HashMap<>(); + connectorProps.put("mq.queue.manager", AbstractJMSContextIT.QMGR_NAME); + connectorProps.put("mq.connection.mode", AbstractJMSContextIT.CONNECTION_MODE); + connectorProps.put("mq.connection.name.list", AbstractJMSContextIT.HOST_NAME + "(" + + MQ_CONTAINER.getMappedPort(AbstractJMSContextIT.TCP_MQ_EXPOSED_PORT).toString() + ")"); + connectorProps.put("mq.channel.name", AbstractJMSContextIT.CHANNEL_NAME); + connectorProps.put("mq.queue", AbstractJMSContextIT.DEFAULT_SINK_QUEUE_NAME); + connectorProps.put("mq.user.authentication.mqcsp", String.valueOf(USER_AUTHENTICATION_MQCSP)); + connectorProps.put("mq.user.name", AbstractJMSContextIT.APP_USERNAME); + connectorProps.put("mq.password", AbstractJMSContextIT.APP_PASSWORD); + connectorProps.put("mq.message.builder", AbstractJMSContextIT.DEFAULT_MESSAGE_BUILDER); + + final MQSinkTask newConnectTask = new MQSinkTask(); newConnectTask.start(connectorProps); - List records = new ArrayList<>(); - SinkRecord record = new SinkRecord("KAFKA.TOPIC", 0, + final List records = new ArrayList<>(); + final SinkRecord record = new SinkRecord(AbstractJMSContextIT.TOPIC, 0, null, null, null, "message payload", 0); @@ -86,53 +89,45 @@ public void testAuthenticatedQueueManager() throws Exception { newConnectTask.stop(); - List messages = getAllMessagesFromQueue(); + final List messages = getAllMessagesFromQueue(); assertEquals(1, messages.size()); assertEquals("message payload", messages.get(0).getBody(String.class)); } - - private void waitForQueueManagerStartup() throws TimeoutException { - WaitingConsumer logConsumer = new WaitingConsumer(); - MQ_CONTAINER.followOutput(logConsumer); - logConsumer.waitUntil(logline -> logline.getUtf8String().contains("AMQ5806I: Queued Publish/Subscribe Daemon started for queue manager")); - } - private List getAllMessagesFromQueue() throws JMSException { Connection connection = null; Session session = null; Destination destination = null; MessageConsumer consumer = null; - JmsFactoryFactory ff = JmsFactoryFactory.getInstance(WMQConstants.WMQ_PROVIDER); + final JmsFactoryFactory ff = JmsFactoryFactory.getInstance(WMQConstants.WMQ_PROVIDER); - JmsConnectionFactory cf = ff.createConnectionFactory(); - cf.setStringProperty(WMQConstants.WMQ_HOST_NAME, "localhost"); - cf.setIntProperty(WMQConstants.WMQ_PORT, MQ_CONTAINER.getMappedPort(1414)); - cf.setStringProperty(WMQConstants.WMQ_CHANNEL, CHANNEL_NAME); + final JmsConnectionFactory cf = ff.createConnectionFactory(); + cf.setStringProperty(WMQConstants.WMQ_HOST_NAME, AbstractJMSContextIT.HOST_NAME); + cf.setIntProperty(WMQConstants.WMQ_PORT, MQ_CONTAINER.getMappedPort(AbstractJMSContextIT.TCP_MQ_EXPOSED_PORT)); + cf.setStringProperty(WMQConstants.WMQ_CHANNEL, AbstractJMSContextIT.CHANNEL_NAME); cf.setIntProperty(WMQConstants.WMQ_CONNECTION_MODE, WMQConstants.WMQ_CM_CLIENT); - cf.setStringProperty(WMQConstants.WMQ_QUEUE_MANAGER, QMGR_NAME); - cf.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, true); - cf.setStringProperty(WMQConstants.USERID, "app"); - cf.setStringProperty(WMQConstants.PASSWORD, APP_PASSWORD); + cf.setStringProperty(WMQConstants.WMQ_QUEUE_MANAGER, AbstractJMSContextIT.QMGR_NAME); + cf.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, USER_AUTHENTICATION_MQCSP); + cf.setStringProperty(WMQConstants.USERID, AbstractJMSContextIT.APP_USERNAME); + cf.setStringProperty(WMQConstants.PASSWORD, AbstractJMSContextIT.APP_PASSWORD); connection = cf.createConnection(); session = connection.createSession(false, Session.AUTO_ACKNOWLEDGE); - destination = session.createQueue(QUEUE_NAME); + destination = session.createQueue(AbstractJMSContextIT.DEFAULT_SINK_QUEUE_NAME); consumer = session.createConsumer(destination); connection.start(); - List messages = new ArrayList<>(); + final List messages = new ArrayList<>(); Message message; do { message = consumer.receiveNoWait(); if (message != null) { messages.add(message); } - } - while (message != null); + } while (message != null); connection.close(); diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskExactlyOnceIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskExactlyOnceIT.java new file mode 100644 index 0000000..2f7794e --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskExactlyOnceIT.java @@ -0,0 +1,562 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink; + +import static com.ibm.eventstreams.connect.mqsink.util.MQRestAPIHelper.START_CHANNEL; +import static com.ibm.eventstreams.connect.mqsink.util.MQRestAPIHelper.STOP_CHANNEL; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doCallRealMethod; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import javax.jms.JMSConsumer; +import javax.jms.JMSException; +import javax.jms.JMSRuntimeException; +import javax.jms.Message; + +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.sink.SinkRecord; +import org.junit.After; +import org.junit.Test; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.eventstreams.connect.mqsink.util.SinkRecordBuilderForTest; +import com.ibm.eventstreams.connect.mqsink.utils.Configs; + + +public class MQSinkTaskExactlyOnceIT extends AbstractJMSContextIT { + + @After + public void after() throws Exception { + clearAllMessages(DEFAULT_SINK_QUEUE_NAME); + clearAllMessages(DEFAULT_SINK_STATE_QUEUE_NAME); + } + + @Test + public void testCrashBeforeCommitToKafkaThenRollbackOccurs() throws Exception { + final Map connectorProps = getExactlyOnceConnectionDetails(); + + final MQSinkTask mqSinkTask = getMqSinkTask(connectorProps); + + final JMSWorker jmsWorkerSpy = configureJMSWorkerSpy(Configs.customConfig(connectorProps), mqSinkTask); + + final List sinkRecords = createSinkRecords(10); + + doCallRealMethod() + .doCallRealMethod() + .doAnswer(invocation -> { + // Send the record as expected + final SinkRecord sinkRecord = invocation.getArgument(0); + jmsWorkerSpy.send(sinkRecord); + + // But also do the STOP channel + mqRestApiHelper.sendCommand(STOP_CHANNEL); + return null; + }) + .doCallRealMethod() // --> failure happens the next time we call. + .when(jmsWorkerSpy).send(any(SinkRecord.class)); + + assertThrows(ConnectException.class, () -> { + mqSinkTask.put(sinkRecords); + }); + + mqRestApiHelper.sendCommand(START_CHANNEL); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)).isEmpty(); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME)).isEmpty(); + + mqSinkTask.put(sinkRecords); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly( + "Message with offset 0 ", + "Message with offset 1 ", + "Message with offset 2 ", + "Message with offset 3 ", + "Message with offset 4 ", + "Message with offset 5 ", + "Message with offset 6 ", + "Message with offset 7 ", + "Message with offset 8 ", + "Message with offset 9 "); + final List stateQueueMessages = browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME); + assertThat(stateQueueMessages) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .containsExactly("9"); + } + + @Test + public void testCrashAfterMQCommitBeforeKafkaCommit() throws Exception { + final Map connectorProps = getExactlyOnceConnectionDetails(); + + MQSinkTask mqSinkTask = getMqSinkTask(connectorProps); + + final List sinkRecords = createSinkRecordsFromOffsets(Arrays.asList( + 124L, + 125L, + 126L)); + + mqSinkTask.put(sinkRecords); + + List stateQueueMessages = browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME); + assertThat(stateQueueMessages) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .containsExactly("126"); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly( + "Message with offset 124 ", + "Message with offset 125 ", + "Message with offset 126 "); + + // Closest we can simulate a connect "crash", the idea being that this would + // happen after MQ commit, before Kafka committed the records + mqSinkTask.stop(); + mqSinkTask = getMqSinkTask(connectorProps); + + // Put called again with the same records + a few more. + final List newSinkRecords = createSinkRecordsFromOffsets(Arrays.asList( + 127L, + 128L, + 129L)); + sinkRecords.addAll(newSinkRecords); + mqSinkTask.put(sinkRecords); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly( + "Message with offset 124 ", + "Message with offset 125 ", + "Message with offset 126 ", + "Message with offset 127 ", + "Message with offset 128 ", + "Message with offset 129 "); + stateQueueMessages = browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME); + assertThat(stateQueueMessages) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .containsExactly("129"); + } + + @Test + public void testOnlyOnceWithMultiplePartitionsAndMultipleTopics() + throws Exception { + final Map connectorProps = getExactlyOnceConnectionDetails(); + + final MQSinkTask mqSinkTask = getMqSinkTask(connectorProps); + + final JMSWorker jmsWorkerSpy = configureJMSWorkerSpy(Configs.customConfig(connectorProps), mqSinkTask); + + final List sinkRecords = new ArrayList<>(); + + final SinkRecordBuilderForTest recordBuilder = new SinkRecordBuilderForTest().keySchema(null).key(null) + .valueSchema(null); + + sinkRecords.addAll(Arrays.asList( + recordBuilder.topic("TOPIC-A").partition(0).offset(0L).value("TOPIC-A-0-0L").build(), + recordBuilder.topic("TOPIC-A").partition(1).offset(0L).value("TOPIC-A-1-0L").build(), + recordBuilder.topic("TOPIC-A").partition(2).offset(0L).value("TOPIC-A-2-0L").build(), + recordBuilder.topic("TOPIC-B").partition(0).offset(0L).value("TOPIC-B-0-0L").build(), + recordBuilder.topic("TOPIC-B").partition(1).offset(0L).value("TOPIC-B-1-0L").build(), + recordBuilder.topic("TOPIC-B").partition(2).offset(0L).value("TOPIC-B-2-0L").build(), + recordBuilder.topic("TOPIC-A").partition(0).offset(1L).value("TOPIC-A-0-1L").build(), + recordBuilder.topic("TOPIC-A").partition(1).offset(1L).value("TOPIC-A-1-1L").build(), + recordBuilder.topic("TOPIC-A").partition(2).offset(1L).value("TOPIC-A-2-1L").build(), + recordBuilder.topic("TOPIC-B").partition(0).offset(1L).value("TOPIC-B-0-1L").build(), + recordBuilder.topic("TOPIC-B").partition(1).offset(1L).value("TOPIC-B-1-1L").build(), + recordBuilder.topic("TOPIC-B").partition(2).offset(1L).value("TOPIC-B-2-1L").build())); + + checkIfItCrashBeforeCommitToKafkaThenRollbackOccurs(mqSinkTask, jmsWorkerSpy, sinkRecords); + // ------------------------------ + sinkRecords.clear(); + sinkRecords.addAll(Arrays.asList( + recordBuilder.topic("TOPIC-A").partition(0).offset(2L).value("TOPIC-A-0-2L").build(), + recordBuilder.topic("TOPIC-A").partition(1).offset(2L).value("TOPIC-A-1-2L").build(), + recordBuilder.topic("TOPIC-A").partition(2).offset(2L).value("TOPIC-A-2-2L").build(), + recordBuilder.topic("TOPIC-B").partition(0).offset(2L).value("TOPIC-B-0-2L").build(), + recordBuilder.topic("TOPIC-B").partition(1).offset(2L).value("TOPIC-B-1-2L").build(), + recordBuilder.topic("TOPIC-B").partition(2).offset(2L).value("TOPIC-B-2-2L").build())); + + checkIfItCrashAfterMQCommitBeforeKafkaCommit(connectorProps, mqSinkTask, jmsWorkerSpy, sinkRecords); + } + + private void checkIfItCrashBeforeCommitToKafkaThenRollbackOccurs(final MQSinkTask mqSinkTask, + final JMSWorker jmsWorkerSpy, + final List sinkRecords) + throws IOException, KeyManagementException, NoSuchAlgorithmException, JMSException { + doCallRealMethod() + .doCallRealMethod() + .doAnswer(invocation -> { + // Send the record as expected + final SinkRecord sinkRecord = invocation.getArgument(0); + jmsWorkerSpy.send(sinkRecord); + + // But also do the STOP channel + mqRestApiHelper.sendCommand(STOP_CHANNEL); + return null; + }) + .doCallRealMethod() // --> failure happens the next time we call. + .when(jmsWorkerSpy).send(any(SinkRecord.class)); + + assertThrows(ConnectException.class, () -> { + mqSinkTask.put(sinkRecords); + }); + + mqRestApiHelper.sendCommand(START_CHANNEL); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)).isEmpty(); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME)).isEmpty(); + + mqSinkTask.put(sinkRecords); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly("TOPIC-A-0-0L", + "TOPIC-A-1-0L", + "TOPIC-A-2-0L", + "TOPIC-B-0-0L", + "TOPIC-B-1-0L", + "TOPIC-B-2-0L", + "TOPIC-A-0-1L", // <-- last offset record info saved to state queue + "TOPIC-A-1-1L", // <-- last offset record info saved to state queue + "TOPIC-A-2-1L", // <-- last offset record info saved to state queue + "TOPIC-B-0-1L", // <-- last offset record info saved to state queue + "TOPIC-B-1-1L", // <-- last offset record info saved to state queue + "TOPIC-B-2-1L"); // <-- last offset record info saved to state queue + final List stateQueueMessages = browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME); + assertThat(stateQueueMessages) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractHashMapFromString(message)) + .containsExactly( + new HashMap() { + { + put("TOPIC-A-0", "1"); + put("TOPIC-A-1", "1"); + put("TOPIC-A-2", "1"); + put("TOPIC-B-0", "1"); + put("TOPIC-B-1", "1"); + put("TOPIC-B-2", "1"); + } + }); + } + + private void checkIfItCrashAfterMQCommitBeforeKafkaCommit(final Map connectorProps, + final MQSinkTask mqSinkTask, + final JMSWorker jmsWorkerSpy, final List sinkRecords) + throws IOException, KeyManagementException, NoSuchAlgorithmException, JMSException { + final List stateQueueMessages; + doCallRealMethod() + .doCallRealMethod() + .doAnswer(invocation -> { + // Send the record as expected + final SinkRecord sinkRecord = invocation.getArgument(0); + jmsWorkerSpy.send(sinkRecord); + + // But also do the STOP channel + mqRestApiHelper.sendCommand(STOP_CHANNEL); + return null; + }) + .doCallRealMethod() // --> failure happens the next time we call. + .when(jmsWorkerSpy).send(any(SinkRecord.class)); + + assertThrows(ConnectException.class, () -> { + mqSinkTask.put(sinkRecords); + }); + mqRestApiHelper.sendCommand(START_CHANNEL); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractHashMapFromString(message)) + .containsExactly( + new HashMap() { + { + put("TOPIC-A-0", "1"); + put("TOPIC-A-1", "1"); + put("TOPIC-A-2", "1"); + put("TOPIC-B-0", "1"); + put("TOPIC-B-1", "1"); + put("TOPIC-B-2", "1"); + } + }); + mqSinkTask.put(sinkRecords); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractHashMapFromString(message)) + .containsExactly( + new HashMap() { + { + put("TOPIC-A-0", "2"); + put("TOPIC-A-1", "2"); + put("TOPIC-A-2", "2"); + put("TOPIC-B-0", "2"); + put("TOPIC-B-1", "2"); + put("TOPIC-B-2", "2"); + } + }); + // Closest we can simulate a connect "crash", the idea being that this would + // happen after MQ commit, before Kafka committed the records + mqSinkTask.stop(); + final MQSinkTask mqSinkTaskNew = getMqSinkTask(connectorProps); + // Put called again with the same records + a few more. + final List newSinkRecords = createSinkRecordsFromOffsets(Arrays.asList( + 127L, + 128L, + 129L)); + sinkRecords.addAll(newSinkRecords); + mqSinkTaskNew.put(sinkRecords); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly( + "TOPIC-A-0-0L", + "TOPIC-A-1-0L", + "TOPIC-A-2-0L", + "TOPIC-B-0-0L", + "TOPIC-B-1-0L", + "TOPIC-B-2-0L", + "TOPIC-A-0-1L", + "TOPIC-A-1-1L", + "TOPIC-A-2-1L", + "TOPIC-B-0-1L", + "TOPIC-B-1-1L", + "TOPIC-B-2-1L", + "TOPIC-A-0-2L", + "TOPIC-A-1-2L", + "TOPIC-A-2-2L", + "TOPIC-B-0-2L", + "TOPIC-B-1-2L", + "TOPIC-B-2-2L", + "Message with offset 127 ", + "Message with offset 128 ", + "Message with offset 129 "); + stateQueueMessages = browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME); + assertThat(stateQueueMessages) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractHashMapFromString(message)) + .containsExactly( + new HashMap() { + { + put("TOPIC-A-0", "2"); + put("TOPIC-A-1", "2"); + put("TOPIC-A-2", "2"); + put("TOPIC-B-0", "2"); + put("TOPIC-B-1", "2"); + put("TOPIC-B-2", "2"); + put(TOPIC + "-" + PARTITION, "129"); + } + }); + } + + @Test + public void testFailureOfWriteLastRecordOffsetToStateQueue() throws JsonProcessingException, JMSException { + // In this test we simulate a failure of the writeLastRecordOffsetToStateQueue + // method. We do this by creating a spy of the ObjectMapper and throwing an + // exception when the method is called. We then check that the state queue is + // not updated and that the records are not committed to MQ. + final Map connectorProps = getExactlyOnceConnectionDetails(); + + // ----------------------------------- + // We send 2 records to MQ so that the last committed offset is saved to the + // state queue + final MQSinkTask mqSinkTask = getMqSinkTask(connectorProps); + // put in two record to see if the put is working + final List sinkRecords = createSinkRecordsFromOffsets(Arrays.asList( + 121L, + 122L)); // This will get saved in the state queue + mqSinkTask.put(sinkRecords); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .containsExactly("122"); + + // ----------------------------------- + // recreate JsonProcessingException when calling + // jmsWorker.writeLastRecordOffsetToStateQueue(lastCommittedOffsetMap); This + // will cause the put method to fail. We check that the state queue is not + // updated and that the records are not committed to MQ. + mqSinkTask.worker.mapper = spy(ObjectMapper.class); + final List failingSinkRecords = getSinkRecordThatThrowsJSONProcessingException(mqSinkTask); + assertThrows(ConnectException.class, () -> { + mqSinkTask.put(failingSinkRecords); + }); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly( + "Message with offset 121 ", + "Message with offset 122 "); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .containsExactly("122"); + + // ----------------------------------- + // We recreate the MQSinkTask and send 5 records to MQ, The first 2 records are + // the records from the previous failed put. The last 3 records are new records. + // We check that the state queue is updated and that the records are committed + // to MQ. + + // Closest we can simulate a connect "crash", the idea being that this would + // happen after MQ commit, before Kafka committed the records + mqSinkTask.stop(); + mqSinkTask.start(connectorProps); + + // Put called again with the same records + a few more. + final List newSinkRecords = createSinkRecordsFromOffsets(Arrays.asList( + 125L, + 126L, + 127L)); // This will get saved in the state queue + failingSinkRecords.addAll(newSinkRecords); + sinkRecords.addAll(failingSinkRecords); + mqSinkTask.put(sinkRecords); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly( + "Message with offset 121 ", + "Message with offset 122 ", + "Message with offset 123 ", + "Message with offset 124 ", + "Message with offset 125 ", + "Message with offset 126 ", + "Message with offset 127 "); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .containsExactly("127"); + } + + @Test + public void testFailureOfReadFromStateQueue() throws JsonProcessingException, JMSException { + // In this test we simulate a failure of the readFromStateQueue method. We do + // this by creating a spy of the JMSConsumer and throwing an exception when the + // method is called. We then check that the state queue is not updated and that + // the records are not committed to MQ. + + final Map connectorProps = getExactlyOnceConnectionDetails(); + + // ----------------------------------- + // We send 2 records to MQ so that the last committed offset is saved to the + // state queue + final MQSinkTask mqSinkTask = getMqSinkTask(connectorProps); + final List sinkRecords = createSinkRecordsFromOffsets(Arrays.asList( + 121L, + 122L)); // This will get saved in the state queue + mqSinkTask.put(sinkRecords); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .containsExactly("122"); + + // ----------------------------------- + // We recreate the MQSinkTask and send 2 records to MQ. We check that the state + // queue is not updated and that the records are not committed to MQ. + final JMSConsumer jmsComsumerSpy = spy(mqSinkTask.worker.jmsCons); + mqSinkTask.worker.jmsCons = jmsComsumerSpy; + final List failingSinkRecords = createSinkRecordsFromOffsets(Arrays.asList( + 123L, + 124L)); + + // This recreates the exception that is thrown when the readFromStateQueue + doThrow(new JMSRuntimeException("This is a JMSException caused by a spy!!")) + .when(jmsComsumerSpy) + .receiveNoWait(); + + assertThrows(ConnectException.class, () -> { + mqSinkTask.put(failingSinkRecords); + }); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly( + "Message with offset 121 ", + "Message with offset 122 "); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .containsExactly("122"); + + // ----------------------------------- + // We recreate the MQSinkTask and send 5 records to MQ, The first 2 records are + // the records from the previous failed put. The last 3 records are new records. + // We check that the state queue is updated and that the records are committed + // to MQ. + + // Closest we can simulate a connect "crash", the idea being that this would + // happen after MQ commit, before Kafka committed the records + mqSinkTask.stop(); + mqSinkTask.start(connectorProps); + + // Put called again with the same records + a few more. + final List newSinkRecords = createSinkRecordsFromOffsets(Arrays.asList( + 125L, + 126L, + 127L)); // This will get saved in the state queue + failingSinkRecords.addAll(newSinkRecords); + sinkRecords.addAll(failingSinkRecords); + mqSinkTask.put(sinkRecords); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .containsExactly( + "Message with offset 121 ", + "Message with offset 122 ", + "Message with offset 123 ", + "Message with offset 124 ", + "Message with offset 125 ", + "Message with offset 126 ", + "Message with offset 127 "); + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_STATE_QUEUE_NAME)) + .extracting(message -> message.getBody(String.class)) + .extracting(message -> extractOffsetFromHashMapString(message, TOPIC + "-" + PARTITION)) + .containsExactly("127"); + } + + private List getSinkRecordThatThrowsJSONProcessingException(final MQSinkTask mqSinkTask) + throws JsonProcessingException { + final List sinkRecords; + sinkRecords = createSinkRecordsFromOffsets(Arrays.asList( + 123L, + 124L)); + + final HashMap lastCommittedOffsetMapThatNeedsToThrowTheException = new HashMap() { + { + put(TOPIC + "-" + PARTITION, String.valueOf(124)); + } + }; + when(mqSinkTask.worker.mapper.writeValueAsString(lastCommittedOffsetMapThatNeedsToThrowTheException)) + .thenThrow(new JsonProcessingException("This a test exception") { + private static final long serialVersionUID = 1L; + }); + return sinkRecords; + } +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskExceptionHandlingIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskExceptionHandlingIT.java new file mode 100644 index 0000000..4dcffe6 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskExceptionHandlingIT.java @@ -0,0 +1,257 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink; + +import static com.ibm.eventstreams.connect.mqsink.util.MQRestAPIHelper.START_CHANNEL; +import static com.ibm.eventstreams.connect.mqsink.util.MQRestAPIHelper.STOP_CHANNEL; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import java.io.IOException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.util.List; +import java.util.Map; + +import javax.jms.InvalidDestinationRuntimeException; +import javax.jms.JMSException; +import javax.jms.JMSRuntimeException; +import javax.jms.Message; +import javax.jms.TextMessage; + +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.errors.RetriableException; +import org.apache.kafka.connect.sink.SinkRecord; +import org.junit.After; +import org.junit.Test; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.ibm.eventstreams.connect.mqsink.utils.Configs; +import com.ibm.mq.MQException; + +public class MQSinkTaskExceptionHandlingIT extends AbstractJMSContextIT { + + @After + public void cleanup() throws Exception { + clearAllMessages(DEFAULT_SINK_QUEUE_NAME); + clearAllMessages(DEFAULT_SINK_STATE_QUEUE_NAME); + } + + @Test + public void testMQSinkTaskStartJMSException() { + final Map connectorConfigProps = getExactlyOnceConnectionDetails(); + final MQSinkTask connectTaskSpy = spy(getMqSinkTask(connectorConfigProps)); + + final JMSWorker jmsWorkerSpy = spy(JMSWorker.class); + when(connectTaskSpy.newJMSWorker()).thenReturn(jmsWorkerSpy); + doThrow(new JMSRuntimeException("This is a JMSException caused by a spy!!")) + .when(jmsWorkerSpy) + .configureProducer(); + + assertThrows(ConnectException.class, () -> { + connectTaskSpy.start(connectorConfigProps); + }); + } + + @Test + public void testMQSinkTaskStartJMSWorkerConnectionException() { + final Map connectorConfigProps = getExactlyOnceConnectionDetails(); + final MQSinkTask connectTaskSpy = spy(getMqSinkTask(connectorConfigProps)); + + final JMSWorker jmsWorkerSpy = spy(JMSWorker.class); + when(connectTaskSpy.newJMSWorker()).thenReturn(jmsWorkerSpy); + doThrow(new JMSWorkerConnectionException("This is a JMSWorkerConnectionException caused by a spy!!")) + .when(jmsWorkerSpy) + .configure(Configs.customConfig(connectorConfigProps)); + + assertThrows(ConnectException.class, () -> { + connectTaskSpy.start(connectorConfigProps); + }); + } + + @Test + public void testMQSinkTaskStartInvalidDestinationRuntimeException() { + final Map connectorConfigProps = getExactlyOnceConnectionDetails(); + final MQSinkTask connectTaskSpy = spy(getMqSinkTask(connectorConfigProps)); + + final JMSWorker jmsWorkerSpy = spy(JMSWorker.class); + when(connectTaskSpy.newJMSWorker()).thenReturn(jmsWorkerSpy); + doThrow(new InvalidDestinationRuntimeException( + "This is a InvalidDestinationRuntimeException caused by a spy!!")) + .when(jmsWorkerSpy) + .createConsumerForStateQueue(); + + assertThrows(ConnectException.class, () -> { + connectTaskSpy.start(connectorConfigProps); + }); + } + + @Test + public void testPutDoesThrowExceptionDueToMQConnectionError() + throws JMSException, KeyManagementException, NoSuchAlgorithmException, IOException { + + assertThat(getAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME).size()).isEqualTo(0); + + final Map connectorConfigProps = getExactlyOnceConnectionDetails(); + + final MQSinkTask connectTaskSpy = spy(getMqSinkTask(connectorConfigProps)); + + connectTaskSpy.start(connectorConfigProps); + + final List sinkRecords = createSinkRecords(10); + + connectTaskSpy.put(sinkRecords); + + assertThat(browseAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME).size()).isEqualTo(10); + + mqRestApiHelper.sendCommand(STOP_CHANNEL); + assertThrows(RetriableException.class, () -> { + connectTaskSpy.put(sinkRecords); + }); + + mqRestApiHelper.sendCommand(START_CHANNEL); + + assertThat(getAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME)) + .extracting((Message message) -> ((TextMessage) message).getText()) + .containsExactly("Message with offset 0 ", + "Message with offset 1 ", + "Message with offset 2 ", + "Message with offset 3 ", + "Message with offset 4 ", + "Message with offset 5 ", + "Message with offset 6 ", + "Message with offset 7 ", + "Message with offset 8 ", + "Message with offset 9 "); + verify(connectTaskSpy.getContext(), times(1)).timeout(connectTaskSpy.retryBackoffMs); + verify(connectTaskSpy, times(1)).stop(); + } + + @Test + public void testMQSinkTaskPutJsonProcessingException() + throws JsonProcessingException, JMSRuntimeException, JMSException { + final Map connectorConfigProps = getExactlyOnceConnectionDetails(); + final MQSinkTask connectTaskSpy = spy(getMqSinkTask(connectorConfigProps)); + + final JMSWorker jmsWorkerSpy = spy(JMSWorker.class); + when(connectTaskSpy.newJMSWorker()).thenReturn(jmsWorkerSpy); + doThrow(new JsonProcessingException("This is a JsonProcessingException caused by a spy!!") { + private static final long serialVersionUID = 1L; + }) + .when(jmsWorkerSpy) + .readFromStateQueue(); + + connectTaskSpy.start(connectorConfigProps); + final List sinkRecords = createSinkRecords(10); + assertThrows(ConnectException.class, () -> { + connectTaskSpy.put(sinkRecords); + }); + verify(connectTaskSpy.getContext(), times(0)).timeout(connectTaskSpy.retryBackoffMs); + verify(connectTaskSpy, times(1)).stop(); + } + + @Test + public void testMQSinkTaskPutJMSWithRetriableException() + throws JsonProcessingException, JMSRuntimeException, JMSException { + final Map connectorConfigProps = getExactlyOnceConnectionDetails(); + final MQSinkTask connectTaskSpy = spy(getMqSinkTask(connectorConfigProps)); + + final JMSWorker jmsWorkerSpy = spy(JMSWorker.class); + final MQException exp = new MQException(1, 2003, getClass()); + when(connectTaskSpy.newJMSWorker()).thenReturn(jmsWorkerSpy); + doThrow(new JMSRuntimeException("This is a JMSRuntimeException caused by a spy!!", "custom error code", exp)) + .when(jmsWorkerSpy) + .readFromStateQueue(); + + connectTaskSpy.start(connectorConfigProps); + final List sinkRecords = createSinkRecords(10); + assertThrows(RetriableException.class, () -> { + connectTaskSpy.put(sinkRecords); + }); + verify(connectTaskSpy.getContext(), times(1)).timeout(connectTaskSpy.retryBackoffMs); + verify(connectTaskSpy, times(1)).stop(); + } + + @Test + public void testMQSinkTaskPutJMSException() + throws JsonProcessingException, JMSRuntimeException, JMSException { + final Map connectorConfigProps = getExactlyOnceConnectionDetails(); + final MQSinkTask connectTaskSpy = spy(getMqSinkTask(connectorConfigProps)); + + final JMSWorker jmsWorkerSpy = spy(JMSWorker.class); + when(connectTaskSpy.newJMSWorker()).thenReturn(jmsWorkerSpy); + doThrow(new JMSRuntimeException("This is a JMSRuntimeException caused by a spy!!", "custom error code")) + .when(jmsWorkerSpy) + .readFromStateQueue(); + + connectTaskSpy.start(connectorConfigProps); + final List sinkRecords = createSinkRecords(10); + assertThrows(ConnectException.class, () -> { + connectTaskSpy.put(sinkRecords); + }); + verify(connectTaskSpy.getContext(), times(0)).timeout(connectTaskSpy.retryBackoffMs); + verify(connectTaskSpy, times(1)).stop(); + } + + @Test + public void testMQSinkTaskPutJMSWithoutRetriableException() + throws JsonProcessingException, JMSRuntimeException, JMSException { + final Map connectorConfigProps = getExactlyOnceConnectionDetails(); + final MQSinkTask connectTaskSpy = spy(getMqSinkTask(connectorConfigProps)); + + final JMSWorker jmsWorkerSpy = spy(JMSWorker.class); + final MQException exp = new MQException(1, 1, getClass()); + when(connectTaskSpy.newJMSWorker()).thenReturn(jmsWorkerSpy); + doThrow(new JMSRuntimeException("This is a JMSRuntimeException caused by a spy!!", "custom error code", exp)) + .when(jmsWorkerSpy) + .readFromStateQueue(); + + connectTaskSpy.start(connectorConfigProps); + final List sinkRecords = createSinkRecords(10); + assertThrows(ConnectException.class, () -> { + connectTaskSpy.put(sinkRecords); + }); + verify(connectTaskSpy.getContext(), times(0)).timeout(connectTaskSpy.retryBackoffMs); + verify(connectTaskSpy, times(1)).stop(); + } + + @Test + public void testMQSinkTaskPutRetriableException() + throws JsonProcessingException, JMSRuntimeException, JMSException { + final Map connectorConfigProps = getExactlyOnceConnectionDetails(); + final MQSinkTask connectTaskSpy = spy(getMqSinkTask(connectorConfigProps)); + + final JMSWorker jmsWorkerSpy = spy(JMSWorker.class); + final MQException exp = new MQException(1, 2053, getClass()); + when(connectTaskSpy.newJMSWorker()).thenReturn(jmsWorkerSpy); + doThrow(new RetriableException("This is a RetriableException caused by a spy!!", exp)) + .when(jmsWorkerSpy) + .readFromStateQueue(); + + connectTaskSpy.start(connectorConfigProps); + final List sinkRecords = createSinkRecords(10); + assertThrows(RetriableException.class, () -> { + connectTaskSpy.put(sinkRecords); + }); + verify(connectTaskSpy.getContext(), times(1)).timeout(connectTaskSpy.retryBackoffMs); + verify(connectTaskSpy, times(0)).stop(); + } +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskIT.java index d211345..f6c8ebf 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskIT.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/MQSinkTaskIT.java @@ -1,3 +1,18 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package com.ibm.eventstreams.connect.mqsink; import static org.junit.Assert.assertEquals; @@ -12,50 +27,50 @@ import javax.jms.Message; import javax.jms.TextMessage; +import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.Struct; -import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.sink.SinkRecord; import org.junit.Test; +import com.ibm.eventstreams.connect.mqsink.builders.MessageBuilderException; + public class MQSinkTaskIT extends AbstractJMSContextIT { private static final String TOPIC = "SINK.TOPIC.NAME"; private static final int PARTITION = 3; - private long OFFSET = 0; - + private long commonOffset = 0; - private SinkRecord generateSinkRecord(Schema valueSchema, Object value) { + private SinkRecord generateSinkRecord(final Schema valueSchema, final Object value) { return new SinkRecord(TOPIC, PARTITION, null, null, valueSchema, value, - OFFSET++); + commonOffset++); } - private Map createDefaultConnectorProperties() { - Map props = new HashMap<>(); + final Map props = new HashMap<>(); props.put("mq.queue.manager", getQmgrName()); - props.put("mq.connection.mode", "client"); + props.put("mq.connection.mode", CONNECTION_MODE); props.put("mq.connection.name.list", getConnectionName()); props.put("mq.channel.name", getChannelName()); - props.put("mq.queue", "DEV.QUEUE.1"); - props.put("mq.user.authentication.mqcsp", "false"); + props.put("mq.queue", DEFAULT_SINK_QUEUE_NAME); + props.put("mq.user.authentication.mqcsp", String.valueOf(USER_AUTHENTICATION_MQCSP)); return props; } - @Test public void verifyUnsupportedReplyQueueName() { - Map connectorConfigProps = createDefaultConnectorProperties(); - connectorConfigProps.put("mq.message.builder", "com.ibm.eventstreams.connect.mqsink.builders.DefaultMessageBuilder"); + final Map connectorConfigProps = createDefaultConnectorProperties(); + connectorConfigProps.put("mq.message.builder", + DEFAULT_MESSAGE_BUILDER); connectorConfigProps.put("mq.reply.queue", "queue://QM2/Q2?persistence=2&priority=5"); - MQSinkTask newConnectTask = new MQSinkTask(); - ConnectException exc = assertThrows(ConnectException.class, () -> { + final MQSinkTask newConnectTask = new MQSinkTask(); + final MessageBuilderException exc = assertThrows(MessageBuilderException.class, () -> { newConnectTask.start(connectorConfigProps); }); @@ -64,40 +79,41 @@ public void verifyUnsupportedReplyQueueName() { @Test public void verifyUnsupportedKeyHeader() { - Map connectorConfigProps = createDefaultConnectorProperties(); - connectorConfigProps.put("mq.message.builder", "com.ibm.eventstreams.connect.mqsink.builders.DefaultMessageBuilder"); + final Map connectorConfigProps = createDefaultConnectorProperties(); + connectorConfigProps.put("mq.message.builder", + DEFAULT_MESSAGE_BUILDER); connectorConfigProps.put("mq.message.builder.key.header", "hello"); - MQSinkTask newConnectTask = new MQSinkTask(); - ConnectException exc = assertThrows(ConnectException.class, () -> { + final MQSinkTask newConnectTask = new MQSinkTask(); + final ConfigException exc = assertThrows(ConfigException.class, () -> { newConnectTask.start(connectorConfigProps); }); - assertEquals("Unsupported MQ message builder key header value", exc.getMessage()); + assertEquals("Invalid value hello for configuration mq.message.builder.key.header: String must be one of: null, JMSCorrelationID", exc.getMessage()); } - @Test public void verifyStringMessages() throws JMSException { - MQSinkTask newConnectTask = new MQSinkTask(); + final MQSinkTask newConnectTask = new MQSinkTask(); // configure a sink task for string messages - Map connectorConfigProps = createDefaultConnectorProperties(); - connectorConfigProps.put("mq.message.builder", "com.ibm.eventstreams.connect.mqsink.builders.DefaultMessageBuilder"); + final Map connectorConfigProps = createDefaultConnectorProperties(); + connectorConfigProps.put("mq.message.builder", + DEFAULT_MESSAGE_BUILDER); // start the task so that it connects to MQ newConnectTask.start(connectorConfigProps); // create some test messages - List records = new ArrayList<>(); + final List records = new ArrayList<>(); records.add(generateSinkRecord(null, "hello")); records.add(generateSinkRecord(null, "world")); newConnectTask.put(records); // flush the messages - Map offsets = new HashMap<>(); - TopicPartition topic = new TopicPartition(TOPIC, PARTITION); - OffsetAndMetadata offset = new OffsetAndMetadata(OFFSET); + final Map offsets = new HashMap<>(); + final TopicPartition topic = new TopicPartition(TOPIC, PARTITION); + final OffsetAndMetadata offset = new OffsetAndMetadata(commonOffset); offsets.put(topic, offset); newConnectTask.flush(offsets); @@ -105,7 +121,7 @@ public void verifyStringMessages() throws JMSException { newConnectTask.stop(); // verify that the messages were successfully submitted to MQ - List messagesInMQ = getAllMessagesFromQueue("DEV.QUEUE.1"); + final List messagesInMQ = getAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME); assertEquals(2, messagesInMQ.size()); assertEquals("hello", messagesInMQ.get(0).getBody(String.class)); assertEquals("world", messagesInMQ.get(1).getBody(String.class)); @@ -113,15 +129,16 @@ public void verifyStringMessages() throws JMSException { @Test public void verifyStringJmsMessages() throws JMSException { - MQSinkTask newConnectTask = new MQSinkTask(); + final MQSinkTask newConnectTask = new MQSinkTask(); - String topicProperty = "PutTopicNameHere"; - String partitionProperty = "PutTopicPartitionHere"; - String offsetProperty = "PutOffsetHere"; + final String topicProperty = "PutTopicNameHere"; + final String partitionProperty = "PutTopicPartitionHere"; + final String offsetProperty = "PutOffsetHere"; // configure a sink task for string messages - Map connectorConfigProps = createDefaultConnectorProperties(); - connectorConfigProps.put("mq.message.builder", "com.ibm.eventstreams.connect.mqsink.builders.DefaultMessageBuilder"); + final Map connectorConfigProps = createDefaultConnectorProperties(); + connectorConfigProps.put("mq.message.builder", + DEFAULT_MESSAGE_BUILDER); connectorConfigProps.put("mq.message.body.jms", "true"); connectorConfigProps.put("mq.message.builder.topic.property", topicProperty); connectorConfigProps.put("mq.message.builder.partition.property", partitionProperty); @@ -132,21 +149,21 @@ public void verifyStringJmsMessages() throws JMSException { newConnectTask.start(connectorConfigProps); // create some test messages - List records = new ArrayList<>(); + final List records = new ArrayList<>(); records.add(new SinkRecord(TOPIC, PARTITION, - Schema.STRING_SCHEMA, "key0", - null, "hello", - OFFSET++)); + Schema.STRING_SCHEMA, "key0", + null, "hello", + commonOffset++)); records.add(new SinkRecord(TOPIC, PARTITION, - Schema.STRING_SCHEMA, "key1", - null, "world", - OFFSET++)); + Schema.STRING_SCHEMA, "key1", + null, "world", + commonOffset++)); newConnectTask.put(records); // flush the messages - Map offsets = new HashMap<>(); - TopicPartition topic = new TopicPartition(TOPIC, PARTITION); - OffsetAndMetadata offset = new OffsetAndMetadata(OFFSET); + final Map offsets = new HashMap<>(); + final TopicPartition topic = new TopicPartition(TOPIC, PARTITION); + final OffsetAndMetadata offset = new OffsetAndMetadata(commonOffset); offsets.put(topic, offset); newConnectTask.flush(offsets); @@ -154,7 +171,7 @@ public void verifyStringJmsMessages() throws JMSException { newConnectTask.stop(); // verify that the messages were successfully submitted to MQ - List messagesInMQ = getAllMessagesFromQueue("DEV.QUEUE.1"); + final List messagesInMQ = getAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME); assertEquals(2, messagesInMQ.size()); assertEquals("hello", messagesInMQ.get(0).getBody(String.class)); assertEquals("world", messagesInMQ.get(1).getBody(String.class)); @@ -165,43 +182,43 @@ public void verifyStringJmsMessages() throws JMSException { assertEquals(PARTITION, messagesInMQ.get(i).getIntProperty(partitionProperty)); assertEquals("key" + i, messagesInMQ.get(i).getJMSCorrelationID()); } - assertEquals(OFFSET - 2, messagesInMQ.get(0).getLongProperty(offsetProperty)); - assertEquals(OFFSET - 1, messagesInMQ.get(1).getLongProperty(offsetProperty)); + assertEquals(commonOffset - 2, messagesInMQ.get(0).getLongProperty(offsetProperty)); + assertEquals(commonOffset - 1, messagesInMQ.get(1).getLongProperty(offsetProperty)); } - @Test public void verifyJsonMessages() throws JMSException { - MQSinkTask newConnectTask = new MQSinkTask(); + final MQSinkTask newConnectTask = new MQSinkTask(); // configure a sink task for JSON messages - Map connectorConfigProps = createDefaultConnectorProperties(); - connectorConfigProps.put("mq.message.builder", "com.ibm.eventstreams.connect.mqsink.builders.JsonMessageBuilder"); + final Map connectorConfigProps = createDefaultConnectorProperties(); + connectorConfigProps.put("mq.message.builder", + "com.ibm.eventstreams.connect.mqsink.builders.JsonMessageBuilder"); connectorConfigProps.put("mq.message.builder.value.converter", "org.apache.kafka.connect.json.JsonConverter"); // start the task so that it connects to MQ newConnectTask.start(connectorConfigProps); // create some test messages - Schema fruitSchema = SchemaBuilder.struct() + final Schema fruitSchema = SchemaBuilder.struct() .name("com.ibm.eventstreams.tests.Fruit") .field("fruit", Schema.STRING_SCHEMA) .build(); - Struct apple = new Struct(fruitSchema).put("fruit", "apple"); - Struct banana = new Struct(fruitSchema).put("fruit", "banana"); - Struct pear = new Struct(fruitSchema).put("fruit", "pear"); + final Struct apple = new Struct(fruitSchema).put("fruit", "apple"); + final Struct banana = new Struct(fruitSchema).put("fruit", "banana"); + final Struct pear = new Struct(fruitSchema).put("fruit", "pear"); // give the test messages to the sink task - List records = new ArrayList<>(); + final List records = new ArrayList<>(); records.add(generateSinkRecord(fruitSchema, apple)); records.add(generateSinkRecord(fruitSchema, banana)); records.add(generateSinkRecord(fruitSchema, pear)); newConnectTask.put(records); // flush the messages - Map offsets = new HashMap<>(); - TopicPartition topic = new TopicPartition(TOPIC, PARTITION); - OffsetAndMetadata offset = new OffsetAndMetadata(OFFSET); + final Map offsets = new HashMap<>(); + final TopicPartition topic = new TopicPartition(TOPIC, PARTITION); + final OffsetAndMetadata offset = new OffsetAndMetadata(commonOffset); offsets.put(topic, offset); newConnectTask.flush(offsets); @@ -209,7 +226,7 @@ public void verifyJsonMessages() throws JMSException { newConnectTask.stop(); // verify that the messages were successfully submitted to MQ - List messagesInMQ = getAllMessagesFromQueue("DEV.QUEUE.1"); + final List messagesInMQ = getAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME); assertEquals(3, messagesInMQ.size()); assertEquals("{\"fruit\":\"apple\"}", messagesInMQ.get(0).getBody(String.class)); assertEquals("{\"fruit\":\"banana\"}", messagesInMQ.get(1).getBody(String.class)); @@ -218,54 +235,60 @@ public void verifyJsonMessages() throws JMSException { @Test public void verifyStringWithDefaultBuilder() throws JMSException { - Map connectorConfigProps = createDefaultConnectorProperties(); - connectorConfigProps.put("mq.message.builder.value.converter", "org.apache.kafka.connect.json.StringConverter"); - connectorConfigProps.put("mq.message.builder", "com.ibm.eventstreams.connect.mqsink.builders.DefaultMessageBuilder"); + final Map connectorConfigProps = createDefaultConnectorProperties(); + connectorConfigProps.put("mq.message.builder.value.converter", "org.apache.kafka.connect.storage.StringConverter"); + connectorConfigProps.put("mq.message.builder", + DEFAULT_MESSAGE_BUILDER); verifyMessageConversion(connectorConfigProps, Schema.STRING_SCHEMA, "ABC", "ABC"); } + @Test public void verifyStringWithJsonBuilder() throws JMSException { - Map connectorConfigProps = createDefaultConnectorProperties(); - connectorConfigProps.put("mq.message.builder.value.converter", "org.apache.kafka.connect.json.StringConverter"); - connectorConfigProps.put("mq.message.builder", "com.ibm.eventstreams.connect.mqsink.builders.JsonMessageBuilder"); + final Map connectorConfigProps = createDefaultConnectorProperties(); + connectorConfigProps.put("mq.message.builder.value.converter", "org.apache.kafka.connect.storage.StringConverter"); + connectorConfigProps.put("mq.message.builder", + "com.ibm.eventstreams.connect.mqsink.builders.JsonMessageBuilder"); verifyMessageConversion(connectorConfigProps, Schema.STRING_SCHEMA, "ABC", "\"ABC\""); } + @Test public void verifyJsonWithDefaultBuilder() throws JMSException { - Map connectorConfigProps = createDefaultConnectorProperties(); + final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.builder.value.converter", "org.apache.kafka.connect.json.JsonConverter"); - connectorConfigProps.put("mq.message.builder", "com.ibm.eventstreams.connect.mqsink.builders.DefaultMessageBuilder"); + connectorConfigProps.put("mq.message.builder", + DEFAULT_MESSAGE_BUILDER); verifyMessageConversion(connectorConfigProps, Schema.STRING_SCHEMA, "ABC", "ABC"); } + @Test public void verifyJsonWithJsonBuilder() throws JMSException { - Map connectorConfigProps = createDefaultConnectorProperties(); + final Map connectorConfigProps = createDefaultConnectorProperties(); connectorConfigProps.put("mq.message.builder.value.converter", "org.apache.kafka.connect.json.JsonConverter"); - connectorConfigProps.put("mq.message.builder", "com.ibm.eventstreams.connect.mqsink.builders.JsonMessageBuilder"); + connectorConfigProps.put("mq.message.builder", + "com.ibm.eventstreams.connect.mqsink.builders.JsonMessageBuilder"); verifyMessageConversion(connectorConfigProps, Schema.STRING_SCHEMA, "ABC", "\"ABC\""); } - - - private void verifyMessageConversion(Map connectorProps, Schema inputSchema, Object input, String expectedOutput) throws JMSException { - MQSinkTask newConnectTask = new MQSinkTask(); + private void verifyMessageConversion(final Map connectorProps, final Schema inputSchema, + final Object input, final String expectedOutput) throws JMSException { + final MQSinkTask newConnectTask = new MQSinkTask(); // start the task so that it connects to MQ newConnectTask.start(connectorProps); // send test message - List records = new ArrayList<>(); + final List records = new ArrayList<>(); records.add(generateSinkRecord(inputSchema, input)); newConnectTask.put(records); // flush the message - Map offsets = new HashMap<>(); - TopicPartition topic = new TopicPartition(TOPIC, PARTITION); - OffsetAndMetadata offset = new OffsetAndMetadata(OFFSET); + final Map offsets = new HashMap<>(); + final TopicPartition topic = new TopicPartition(TOPIC, PARTITION); + final OffsetAndMetadata offset = new OffsetAndMetadata(commonOffset); offsets.put(topic, offset); newConnectTask.flush(offsets); @@ -273,10 +296,10 @@ private void verifyMessageConversion(Map connectorProps, Schema newConnectTask.stop(); // verify that the messages was successfully submitted to MQ - List messagesInMQ = getAllMessagesFromQueue("DEV.QUEUE.1"); + final List messagesInMQ = getAllMessagesFromQueue(DEFAULT_SINK_QUEUE_NAME); assertEquals(1, messagesInMQ.size()); - TextMessage txtMessage = (TextMessage) messagesInMQ.get(0); - String output = txtMessage.getText(); + final TextMessage txtMessage = (TextMessage) messagesInMQ.get(0); + final String output = txtMessage.getText(); assertEquals(expectedOutput, output); } } diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilderIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilderIT.java index 4df19c2..aec94a7 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilderIT.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilderIT.java @@ -1,5 +1,5 @@ /** - * Copyright 2022 IBM Corporation + * Copyright 2022, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -43,24 +43,24 @@ public void prepareMessageBuilder() { builder = new DefaultMessageBuilder(); } - private SinkRecord generateSinkRecord(Schema valueSchema, Object value) { - final String TOPIC = "TOPIC.NAME"; - final int PARTITION = 0; - final long OFFSET = 0; - final Schema KEY_SCHEMA = Schema.STRING_SCHEMA; - final String KEY = "mykey"; + private SinkRecord generateSinkRecord(final Schema valueSchema, final Object value) { + final String topic = "TOPIC.NAME"; + final int partition = 0; + final long offset = 0; + final Schema keySchema = Schema.STRING_SCHEMA; + final String key = "mykey"; - return new SinkRecord(TOPIC, PARTITION, - KEY_SCHEMA, KEY, - valueSchema, value, - OFFSET); + return new SinkRecord(topic, partition, + keySchema, key, + valueSchema, value, + offset); } - @Test public void buildEmptyMessageWithoutSchema() throws Exception { createAndVerifyEmptyMessage(null); } + @Test public void buildEmptyMessageWithSchema() throws Exception { createAndVerifyEmptyMessage(Schema.STRING_SCHEMA); @@ -70,101 +70,108 @@ public void buildEmptyMessageWithSchema() throws Exception { public void buildTextMessageWithoutSchema() throws Exception { createAndVerifyStringMessage(null, "Hello World"); } + @Test public void buildTextMessageWithSchema() throws Exception { createAndVerifyStringMessage(Schema.STRING_SCHEMA, "Hello World with a schema"); } + @Test public void buildIntMessageWithoutSchema() throws Exception { createAndVerifyIntegerMessage(null, 1234); } + @Test public void buildIntMessageWithSchema() throws Exception { createAndVerifyIntegerMessage(Schema.INT32_SCHEMA, 1234); } + @Test public void buildByteArrayMessageWithoutSchema() throws Exception { - String TEST_MESSAGE = "This is a test"; - createAndVerifyByteMessage(null, TEST_MESSAGE.getBytes(), TEST_MESSAGE); + final String testMessage = "This is a test"; + createAndVerifyByteMessage(null, testMessage.getBytes(), testMessage); } + @Test public void buildByteArrayMessageWithSchema() throws Exception { - String TEST_MESSAGE = "This is another test"; - createAndVerifyByteMessage(Schema.BYTES_SCHEMA, TEST_MESSAGE.getBytes(), TEST_MESSAGE); + final String testMessage = "This is another test"; + createAndVerifyByteMessage(Schema.BYTES_SCHEMA, testMessage.getBytes(), testMessage); } + @Test public void buildByteBufferMessageWithoutSchema() throws Exception { - String TEST_MESSAGE = "This is also a test!"; - byte[] payload = TEST_MESSAGE.getBytes(); - ByteBuffer value = ByteBuffer.allocate(payload.length); + final String testMessage = "This is also a test!"; + final byte[] payload = testMessage.getBytes(); + final ByteBuffer value = ByteBuffer.allocate(payload.length); value.put(payload); - createAndVerifyByteMessage(null, value, TEST_MESSAGE); + createAndVerifyByteMessage(null, value, testMessage); } + @Test public void buildByteBufferMessageWithSchema() throws Exception { - String TEST_MESSAGE = "This is a bytebuffer test"; - byte[] payload = TEST_MESSAGE.getBytes(); - ByteBuffer value = ByteBuffer.allocate(payload.length); + final String testMessage = "This is a bytebuffer test"; + final byte[] payload = testMessage.getBytes(); + final ByteBuffer value = ByteBuffer.allocate(payload.length); value.put(payload); - createAndVerifyByteMessage(Schema.BYTES_SCHEMA, value, TEST_MESSAGE); + createAndVerifyByteMessage(Schema.BYTES_SCHEMA, value, testMessage); } - + @Test public void buildMessageWithTextHeader() throws Exception { - final String TOPIC = "TOPIC.NAME"; - final int PARTITION = 0; - final long OFFSET = 0; - - final String TEST_HEADER_KEY = "TestHeader"; - - ConnectHeaders headers = new ConnectHeaders(); - headers.addString(TEST_HEADER_KEY, "This is a test header"); - - SinkRecord record = new SinkRecord(TOPIC, PARTITION, - Schema.STRING_SCHEMA, "mykey", - Schema.STRING_SCHEMA, "Test message", - OFFSET, - null, TimestampType.NO_TIMESTAMP_TYPE, - headers); - + final String topic = "TOPIC.NAME"; + final int partition = 0; + final long offset = 0; + + final String testHeaderKey = "TestHeader"; + + final ConnectHeaders headers = new ConnectHeaders(); + headers.addString(testHeaderKey, "This is a test header"); + + final SinkRecord record = new SinkRecord(topic, partition, + Schema.STRING_SCHEMA, "mykey", + Schema.STRING_SCHEMA, "Test message", + offset, + null, TimestampType.NO_TIMESTAMP_TYPE, + headers); + // header should not have been copied across by default - Message message = builder.fromSinkRecord(getJmsContext(), record); - assertNull(message.getStringProperty(TEST_HEADER_KEY)); - + final Message message = builder.fromSinkRecord(getJmsContext(), record); + assertNull(message.getStringProperty(testHeaderKey)); + // no message properties should be set by default assertFalse(message.getPropertyNames().hasMoreElements()); } - - private void createAndVerifyEmptyMessage(Schema valueSchema) throws Exception { - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(valueSchema, null)); + private void createAndVerifyEmptyMessage(final Schema valueSchema) throws Exception { + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(valueSchema, null)); assertEquals(null, message.getBody(String.class)); } - private void createAndVerifyStringMessage(Schema valueSchema, String value) throws Exception { - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(valueSchema, value)); + private void createAndVerifyStringMessage(final Schema valueSchema, final String value) throws Exception { + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(valueSchema, value)); assertEquals(value, message.getBody(String.class)); - TextMessage textmessage = (TextMessage) message; + final TextMessage textmessage = (TextMessage) message; assertEquals(value, textmessage.getText()); } - private void createAndVerifyIntegerMessage(Schema valueSchema, Integer value) throws Exception { - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(valueSchema, value)); - Integer intValue = Integer.parseInt(message.getBody(String.class)); + private void createAndVerifyIntegerMessage(final Schema valueSchema, final Integer value) throws Exception { + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(valueSchema, value)); + final Integer intValue = Integer.parseInt(message.getBody(String.class)); assertEquals(value, intValue); } - private void createAndVerifyByteMessage(Schema valueSchema, Object value, String valueAsString) throws Exception { - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(valueSchema, value)); + private void createAndVerifyByteMessage(final Schema valueSchema, final Object value, final String valueAsString) + throws Exception { + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(valueSchema, value)); - BytesMessage byteMessage = (BytesMessage) message; + final BytesMessage byteMessage = (BytesMessage) message; byteMessage.reset(); byte[] byteData = null; byteData = new byte[(int) byteMessage.getBodyLength()]; byteMessage.readBytes(byteData); - String stringMessage = new String(byteData); + final String stringMessage = new String(byteData); assertEquals(valueAsString, stringMessage); } } diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilderWithHeadersIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilderWithHeadersIT.java index 96c5118..59a66ca 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilderWithHeadersIT.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilderWithHeadersIT.java @@ -1,5 +1,5 @@ /** - * Copyright 2023 IBM Corporation + * Copyright 2023, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -41,51 +41,51 @@ public class DefaultMessageBuilderWithHeadersIT extends AbstractJMSContextIT { public void prepareMessageBuilder() { builder = new DefaultMessageBuilder(); - Map props = new HashMap<>(); + final Map props = new HashMap<>(); props.put("mq.kafka.headers.copy.to.jms.properties", "true"); builder.configure(props); } - private SinkRecord generateSinkRecord(ConnectHeaders headers) { - final String TOPIC = "TOPIC.NAME"; - final int PARTITION = 0; - final long OFFSET = 0; - return new SinkRecord(TOPIC, PARTITION, - Schema.STRING_SCHEMA, "mykey", - Schema.STRING_SCHEMA, "Test message", - OFFSET, - null, TimestampType.NO_TIMESTAMP_TYPE, - headers); + private SinkRecord generateSinkRecord(final ConnectHeaders headers) { + final String topic = "TOPIC.NAME"; + final int partition = 0; + final long offset = 0; + return new SinkRecord(topic, partition, + Schema.STRING_SCHEMA, "mykey", + Schema.STRING_SCHEMA, "Test message", + offset, + null, TimestampType.NO_TIMESTAMP_TYPE, + headers); } @Test public void buildMessageWithNoHeaders() throws Exception { // generate MQ message - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(null)); + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(null)); // verify there are no MQ message properties assertFalse(message.getPropertyNames().hasMoreElements()); } - + @Test public void buildMessageWithStringHeaders() throws Exception { - Map testHeaders = new HashMap<>(); - testHeaders.put("HeaderOne", "This is test header one"); - testHeaders.put("HeaderTwo", "This is test header two"); + final Map testHeaders = new HashMap<>(); + testHeaders.put("HeaderOne", "This is test header one"); + testHeaders.put("HeaderTwo", "This is test header two"); testHeaders.put("HeaderThree", "This is test header three"); - testHeaders.put("HeaderFour", "This is test header four"); + testHeaders.put("HeaderFour", "This is test header four"); // prepare Kafka headers for input message - ConnectHeaders headers = new ConnectHeaders(); - for (String key : testHeaders.keySet()) { + final ConnectHeaders headers = new ConnectHeaders(); + for (final String key : testHeaders.keySet()) { headers.addString(key, testHeaders.get(key)); } // generate MQ message - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(headers)); + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(headers)); // verify MQ message properties - for (String key : testHeaders.keySet()) { + for (final String key : testHeaders.keySet()) { assertEquals(testHeaders.get(key), message.getStringProperty(key)); } } @@ -93,15 +93,15 @@ public void buildMessageWithStringHeaders() throws Exception { @Test public void buildMessageWithBooleanHeaders() throws Exception { // prepare Kafka headers for input message - ConnectHeaders headers = new ConnectHeaders(); - headers.addBoolean("TestTrue", true); + final ConnectHeaders headers = new ConnectHeaders(); + headers.addBoolean("TestTrue", true); headers.addBoolean("TestFalse", false); // generate MQ message - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(headers)); + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(headers)); // verify MQ message properties - assertEquals("true", message.getStringProperty("TestTrue")); + assertEquals("true", message.getStringProperty("TestTrue")); assertEquals("false", message.getStringProperty("TestFalse")); assertTrue(message.getBooleanProperty("TestTrue")); assertFalse(message.getBooleanProperty("TestFalse")); @@ -110,13 +110,13 @@ public void buildMessageWithBooleanHeaders() throws Exception { @Test public void buildMessageWithIntegerHeaders() throws Exception { // prepare Kafka headers for input message - ConnectHeaders headers = new ConnectHeaders(); - headers.addInt("TestOne", 1); - headers.addInt("TestTwo", 2); + final ConnectHeaders headers = new ConnectHeaders(); + headers.addInt("TestOne", 1); + headers.addInt("TestTwo", 2); headers.addInt("TestThree", 3); // generate MQ message - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(headers)); + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(headers)); // verify MQ message properties assertEquals("1", message.getStringProperty("TestOne")); @@ -130,11 +130,11 @@ public void buildMessageWithIntegerHeaders() throws Exception { @Test public void buildMessageWithDoubleHeaders() throws Exception { // prepare Kafka headers for input message - ConnectHeaders headers = new ConnectHeaders(); + final ConnectHeaders headers = new ConnectHeaders(); headers.addDouble("TestPi", 3.14159265359); // generate MQ message - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(headers)); + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(headers)); // verify MQ message properties assertEquals("3.14159265359", message.getStringProperty("TestPi")); diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/JsonMessageBuilderIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/JsonMessageBuilderIT.java index 95d90d0..ff2258f 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/JsonMessageBuilderIT.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/JsonMessageBuilderIT.java @@ -1,5 +1,5 @@ /** - * Copyright 2022 IBM Corporation + * Copyright 2022, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -44,24 +44,24 @@ public void prepareMessageBuilder() { builder = new JsonMessageBuilder(); } - private SinkRecord generateSinkRecord(Schema valueSchema, Object value) { - final String TOPIC = "TOPIC.NAME"; - final int PARTITION = 0; - final long OFFSET = 0; - final Schema KEY_SCHEMA = Schema.STRING_SCHEMA; - final String KEY = "mykey"; - - return new SinkRecord(TOPIC, PARTITION, - KEY_SCHEMA, KEY, - valueSchema, value, - OFFSET); + private SinkRecord generateSinkRecord(final Schema valueSchema, final Object value) { + final String topic = "TOPIC.NAME"; + final int partition = 0; + final long offset = 0; + final Schema keySchema = Schema.STRING_SCHEMA; + final String key = "mykey"; + + return new SinkRecord(topic, partition, + keySchema, key, + valueSchema, value, + offset); } - @Test public void buildTextMessageWithoutSchema() throws Exception { createAndVerifyStringMessage(null, "Hello World"); } + @Test public void buildTextMessageWithSchema() throws Exception { createAndVerifyStringMessage(Schema.STRING_SCHEMA, "Hello World with a schema"); @@ -69,13 +69,13 @@ public void buildTextMessageWithSchema() throws Exception { @Test public void buildStructMessage() throws Exception { - Struct testObject = generateComplexObjectAsStruct(); - Schema testSchema = testObject.schema(); + final Struct testObject = generateComplexObjectAsStruct(); + final Schema testSchema = testObject.schema(); - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(testSchema, testObject)); - String contents = message.getBody(String.class); + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(testSchema, testObject)); + final String contents = message.getBody(String.class); - JSONObject jsonContents = new JSONObject(contents); + final JSONObject jsonContents = new JSONObject(contents); assertEquals(3, jsonContents.length()); assertEquals("this is a string", jsonContents.getString("mystring")); assertEquals(true, jsonContents.getJSONObject("myobj").getBoolean("mybool")); @@ -87,12 +87,12 @@ public void buildStructMessage() throws Exception { @Test public void buildMapMessage() throws Exception { - Object testObject = generateComplexObjectAsMap(); + final Object testObject = generateComplexObjectAsMap(); - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(null, testObject)); - String contents = message.getBody(String.class); + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(null, testObject)); + final String contents = message.getBody(String.class); - JSONObject jsonContents = new JSONObject(contents); + final JSONObject jsonContents = new JSONObject(contents); assertEquals(3, jsonContents.length()); assertEquals("this is a string", jsonContents.getString("mystring")); assertEquals(true, jsonContents.getJSONObject("myobj").getBoolean("mybool")); @@ -102,9 +102,8 @@ public void buildMapMessage() throws Exception { assertEquals("first", jsonContents.getJSONArray("myarray").getString(0)); } - private Struct generateComplexObjectAsStruct() { - Schema innerSchema = SchemaBuilder.struct() + final Schema innerSchema = SchemaBuilder.struct() .name("com.ibm.eventstreams.tests.Inner") .field("mybool", Schema.BOOLEAN_SCHEMA) .field("myint", Schema.INT32_SCHEMA) @@ -112,45 +111,45 @@ private Struct generateComplexObjectAsStruct() { .field("mybytes", Schema.BYTES_SCHEMA) .build(); - Schema complexSchema = SchemaBuilder.struct() + final Schema complexSchema = SchemaBuilder.struct() .name("com.ibm.eventstreams.tests.Complex") .field("mystring", Schema.STRING_SCHEMA) .field("myobj", innerSchema) .field("myarray", SchemaBuilder.array(Schema.STRING_SCHEMA)) .build(); - List innerary = new ArrayList<>(); + final List innerary = new ArrayList<>(); innerary.add("first"); innerary.add("second"); innerary.add("third"); innerary.add("fourth"); - Struct obj = new Struct(complexSchema) + final Struct obj = new Struct(complexSchema) .put("mystring", "this is a string") .put("myobj", new Struct(innerSchema) - .put("mybool", true) - .put("myint", 12345) - .put("myfloat", 12.4f) - .put("mybytes", "Hello".getBytes())) + .put("mybool", true) + .put("myint", 12345) + .put("myfloat", 12.4f) + .put("mybytes", "Hello".getBytes())) .put("myarray", innerary); return obj; } private Map generateComplexObjectAsMap() { - Map obj = new HashMap<>(); + final Map obj = new HashMap<>(); obj.put("mystring", "this is a string"); - Map innerobj = new HashMap<>(); + final Map innerobj = new HashMap<>(); innerobj.put("mybool", true); innerobj.put("myint", 12345); innerobj.put("myfloat", 12.4f); innerobj.put("mybytes", "Hello".getBytes()); obj.put("myobj", innerobj); - List innerary = new ArrayList<>(); + final List innerary = new ArrayList<>(); innerary.add("first"); innerary.add("second"); innerary.add("third"); @@ -160,12 +159,11 @@ private Map generateComplexObjectAsMap() { return obj; } - - private void createAndVerifyStringMessage(Schema valueSchema, String value) throws Exception { - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(valueSchema, value)); + private void createAndVerifyStringMessage(final Schema valueSchema, final String value) throws Exception { + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(valueSchema, value)); assertEquals("\"" + value + "\"", message.getBody(String.class)); - TextMessage textmessage = (TextMessage) message; + final TextMessage textmessage = (TextMessage) message; assertEquals("\"" + value + "\"", textmessage.getText()); } } diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/KeyHeaderIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/KeyHeaderIT.java index 466a489..03a96c3 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/KeyHeaderIT.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/KeyHeaderIT.java @@ -1,5 +1,5 @@ /** - * Copyright 2022 IBM Corporation + * Copyright 2022, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -25,7 +25,6 @@ import javax.jms.Message; import org.apache.kafka.connect.data.Schema; -import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.sink.SinkRecord; import org.junit.Test; @@ -33,27 +32,26 @@ public class KeyHeaderIT extends AbstractJMSContextIT { - private SinkRecord generateSinkRecord(Schema keySchema, Object keyValue) { - final String TOPIC = "TOPIC.NAME"; - final int PARTITION = 0; - final long OFFSET = 0; - final Schema VALUE_SCHEMA = Schema.STRING_SCHEMA; - final String VALUE = "message payload"; - - return new SinkRecord(TOPIC, PARTITION, - keySchema, keyValue, - VALUE_SCHEMA, VALUE, - OFFSET); + private SinkRecord generateSinkRecord(final Schema keySchema, final Object keyValue) { + final String topic = "TOPIC.NAME"; + final int partition = 0; + final long offset = 0; + final Schema valueSchema = Schema.STRING_SCHEMA; + final String value = "message payload"; + + return new SinkRecord(topic, partition, + keySchema, keyValue, + valueSchema, value, + offset); } - @Test public void verifyUnsupportedKeyHeader() throws Exception { - Map props = new HashMap<>(); + final Map props = new HashMap<>(); props.put("mq.message.builder.key.header", "unsupported"); - DefaultMessageBuilder builder = new DefaultMessageBuilder(); - ConnectException exc = assertThrows(ConnectException.class, () -> { + final DefaultMessageBuilder builder = new DefaultMessageBuilder(); + final MessageBuilderException exc = assertThrows(MessageBuilderException.class, () -> { builder.configure(props); }); assertEquals("Unsupported MQ message builder key header value", exc.getMessage()); @@ -81,9 +79,9 @@ public void buildByteArrayKeyHeaderWithSchema() throws Exception { @Test public void buildByteBufferKeyHeaderWithoutSchema() throws Exception { - String key = "this-is-my-key"; - byte[] payload = key.getBytes(); - ByteBuffer value = ByteBuffer.allocate(payload.length); + final String key = "this-is-my-key"; + final byte[] payload = key.getBytes(); + final ByteBuffer value = ByteBuffer.allocate(payload.length); value.put(payload); createAndVerifyBytesKeyHeader(null, value, key); @@ -91,34 +89,34 @@ public void buildByteBufferKeyHeaderWithoutSchema() throws Exception { @Test public void buildByteBufferKeyHeaderWithSchema() throws Exception { - String key = "this-is-a-key"; - byte[] payload = key.getBytes(); - ByteBuffer value = ByteBuffer.allocate(payload.length); + final String key = "this-is-a-key"; + final byte[] payload = key.getBytes(); + final ByteBuffer value = ByteBuffer.allocate(payload.length); value.put(payload); createAndVerifyBytesKeyHeader(Schema.BYTES_SCHEMA, value, key); } - - private void createAndVerifyStringKeyHeader(Schema schema, String key) throws Exception { - Map props = new HashMap<>(); + private void createAndVerifyStringKeyHeader(final Schema schema, final String key) throws Exception { + final Map props = new HashMap<>(); props.put("mq.message.builder.key.header", "JMSCorrelationID"); - DefaultMessageBuilder builder = new DefaultMessageBuilder(); + final DefaultMessageBuilder builder = new DefaultMessageBuilder(); builder.configure(props); - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(schema, key)); + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(schema, key)); assertEquals(key, message.getJMSCorrelationID()); } - private void createAndVerifyBytesKeyHeader(Schema schema, Object key, String keyAsString) throws Exception { - Map props = new HashMap<>(); + private void createAndVerifyBytesKeyHeader(final Schema schema, final Object key, final String keyAsString) + throws Exception { + final Map props = new HashMap<>(); props.put("mq.message.builder.key.header", "JMSCorrelationID"); - DefaultMessageBuilder builder = new DefaultMessageBuilder(); + final DefaultMessageBuilder builder = new DefaultMessageBuilder(); builder.configure(props); - Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(schema, key)); + final Message message = builder.fromSinkRecord(getJmsContext(), generateSinkRecord(schema, key)); assertEquals(keyAsString, new String(message.getJMSCorrelationIDAsBytes())); } } diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/DestinationBuilderIT.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/MessagePropertyIT.java similarity index 50% rename from src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/DestinationBuilderIT.java rename to src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/MessagePropertyIT.java index 0dd81b1..020e2d3 100644 --- a/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/DestinationBuilderIT.java +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/builders/MessagePropertyIT.java @@ -1,5 +1,5 @@ /** - * Copyright 2022 IBM Corporation + * Copyright 2022, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,82 +28,78 @@ import com.ibm.eventstreams.connect.mqsink.AbstractJMSContextIT; import com.ibm.mq.jms.MQQueue; -public class DestinationBuilderIT extends AbstractJMSContextIT { - +public class MessagePropertyIT extends AbstractJMSContextIT { @Test public void verifyReplyQueueProperty() throws Exception { - String replyQueue = "queue://QM1/REPLY.Q"; + final String replyQueue = "queue://QM1/REPLY.Q"; - Map props = new HashMap<>(); + final Map props = new HashMap<>(); props.put("mq.reply.queue", replyQueue); - DefaultMessageBuilder builder = new DefaultMessageBuilder(); + final DefaultMessageBuilder builder = new DefaultMessageBuilder(); builder.configure(props); - SinkRecord record = new SinkRecord("topic", 0, null, null, null, "msg", 0); + final SinkRecord record = new SinkRecord("topic", 0, null, null, null, "msg", 0); - Message message = builder.fromSinkRecord(getJmsContext(), record); + final Message message = builder.fromSinkRecord(getJmsContext(), record); assertEquals("msg", message.getBody(String.class)); - MQQueue destination = (MQQueue) message.getJMSReplyTo(); + final MQQueue destination = (MQQueue) message.getJMSReplyTo(); assertEquals(replyQueue, destination.getQueueName()); } - @Test public void verifyTopicNameProperty() throws Exception { - String topicProperty = "PutTopicNameHere"; - String TOPIC = "MY.TOPIC"; + final String topicProperty = "PutTopicNameHere"; + final String topic = "MY.TOPIC"; - Map props = new HashMap<>(); + final Map props = new HashMap<>(); props.put("mq.message.builder.topic.property", topicProperty); - DefaultMessageBuilder builder = new DefaultMessageBuilder(); + final DefaultMessageBuilder builder = new DefaultMessageBuilder(); builder.configure(props); - SinkRecord record = new SinkRecord(TOPIC, 0, null, null, null, "message", 0); + final SinkRecord record = new SinkRecord(topic, 0, null, null, null, "message", 0); - Message message = builder.fromSinkRecord(getJmsContext(), record); + final Message message = builder.fromSinkRecord(getJmsContext(), record); assertEquals("message", message.getBody(String.class)); - assertEquals(TOPIC, message.getStringProperty(topicProperty)); + assertEquals(topic, message.getStringProperty(topicProperty)); } - @Test public void verifyTopicPartitionProperty() throws Exception { - String topicProperty = "PutTopicPartitionHere"; - int PARTITION = 4; + final String topicProperty = "PutTopicPartitionHere"; + final int partition = 4; - Map props = new HashMap<>(); + final Map props = new HashMap<>(); props.put("mq.message.builder.partition.property", topicProperty); - DefaultMessageBuilder builder = new DefaultMessageBuilder(); + final DefaultMessageBuilder builder = new DefaultMessageBuilder(); builder.configure(props); - SinkRecord record = new SinkRecord("topic", PARTITION, null, null, null, "message", 0); + final SinkRecord record = new SinkRecord("topic", partition, null, null, null, "message", 0); - Message message = builder.fromSinkRecord(getJmsContext(), record); + final Message message = builder.fromSinkRecord(getJmsContext(), record); assertEquals("message", message.getBody(String.class)); - assertEquals(PARTITION, message.getIntProperty(topicProperty)); + assertEquals(partition, message.getIntProperty(topicProperty)); } - @Test public void verifyMessageOffsetProperty() throws Exception { - String topicProperty = "PutOffsetHere"; - long OFFSET = 91; + final String topicProperty = "PutOffsetHere"; + final long offset = 91; - Map props = new HashMap<>(); + final Map props = new HashMap<>(); props.put("mq.message.builder.offset.property", topicProperty); - DefaultMessageBuilder builder = new DefaultMessageBuilder(); + final DefaultMessageBuilder builder = new DefaultMessageBuilder(); builder.configure(props); - SinkRecord record = new SinkRecord("topic", 0, null, null, null, "message", OFFSET); + final SinkRecord record = new SinkRecord("topic", 0, null, null, null, "message", offset); - Message message = builder.fromSinkRecord(getJmsContext(), record); + final Message message = builder.fromSinkRecord(getJmsContext(), record); assertEquals("message", message.getBody(String.class)); - assertEquals(OFFSET, message.getLongProperty(topicProperty)); + assertEquals(offset, message.getLongProperty(topicProperty)); } } diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/JsonRestApi.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/JsonRestApi.java new file mode 100644 index 0000000..7a1b1d5 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/JsonRestApi.java @@ -0,0 +1,107 @@ +/** + * Copyright 2022, 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink.util; + +import org.json.JSONException; +import org.json.JSONObject; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.io.Reader; +import java.net.URL; +import java.nio.charset.Charset; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; +import java.security.cert.X509Certificate; +import java.util.Base64; + +import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLSession; +import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.X509TrustManager; + +public class JsonRestApi { + + public static JSONObject jsonPost(final String url, final String username, final String password, + final String payload) throws IOException, KeyManagementException, NoSuchAlgorithmException, JSONException { + final URL urlObj = new URL(url); + final HttpsURLConnection urlConnection = (HttpsURLConnection) urlObj.openConnection(); + urlConnection.setHostnameVerifier(new IgnoreCertVerifier()); + urlConnection.setSSLSocketFactory(getTrustAllCertsFactory()); + urlConnection.setRequestProperty("Authorization", getAuthHeader(username, password)); + urlConnection.setRequestProperty("Content-Type", "application/json"); + urlConnection.setRequestProperty("ibm-mq-rest-csrf-token", "junit"); + urlConnection.setDoOutput(true); + + try (OutputStream os = urlConnection.getOutputStream()) { + final byte[] input = payload.getBytes("utf-8"); + os.write(input, 0, input.length); + } + + try (InputStream input = urlConnection.getInputStream()) { + final BufferedReader re = new BufferedReader(new InputStreamReader(input, Charset.forName("utf-8"))); + return new JSONObject(read(re)); + } + } + + private static String read(final Reader re) throws IOException { + final StringBuilder str = new StringBuilder(); + int ch; + do { + ch = re.read(); + str.append((char) ch); + } while (ch != -1); + return str.toString(); + } + + private static String getAuthHeader(final String username, final String password) { + final String userpass = username + ":" + password; + final String basicAuth = "Basic " + new String(Base64.getEncoder().encode(userpass.getBytes())); + return basicAuth; + } + + private static class IgnoreCertVerifier implements HostnameVerifier { + @Override + public boolean verify(final String host, final SSLSession session) { + return true; + } + } + + private static SSLSocketFactory getTrustAllCertsFactory() throws NoSuchAlgorithmException, KeyManagementException { + final TrustManager[] trustAllCerts = new TrustManager[] { + new X509TrustManager() { + public X509Certificate[] getAcceptedIssuers() { + return null; + } + + public void checkClientTrusted(final X509Certificate[] certs, final String authType) { + } + + public void checkServerTrusted(final X509Certificate[] certs, final String authType) { + } + } + }; + final SSLContext sc = SSLContext.getInstance("SSL"); + sc.init(null, trustAllCerts, new java.security.SecureRandom()); + return sc.getSocketFactory(); + } +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/MQRestAPIHelper.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/MQRestAPIHelper.java new file mode 100644 index 0000000..1e13383 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/MQRestAPIHelper.java @@ -0,0 +1,103 @@ +/** + * Copyright 2022, 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink.util; + +import org.json.JSONException; +import org.json.JSONObject; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.security.KeyManagementException; +import java.security.NoSuchAlgorithmException; + +public class MQRestAPIHelper { + private String qmgrname; + private int portnum; + private String password; + + public MQRestAPIHelper(final String qmgrname, final int portnum, final String password) { + this.qmgrname = qmgrname; + this.portnum = portnum; + this.password = password; + } + + public final Logger log = LoggerFactory.getLogger(MQRestAPIHelper.class); + + public static final String STOP_CHANNEL = "{" + + " \"type\": \"runCommand\"," + + " \"parameters\": {" + + " \"command\": \"STOP CHANNEL('DEV.APP.SVRCONN') MODE(QUIESCE)\"" + + " }" + + "}"; + + public static final String START_CHANNEL = "{" + + " \"type\": \"runCommand\"," + + " \"parameters\": {" + + " \"command\": \"START CHANNEL('DEV.APP.SVRCONN')\"" + + " }" + + "}"; + + public int sendCommand(final String request) throws IOException, KeyManagementException, NoSuchAlgorithmException { + try { + + final String url = "https://localhost:" + portnum + "/ibmmq/rest/v2/admin/action/qmgr/" + qmgrname + + "/mqsc"; + final JSONObject commandResult = JsonRestApi.jsonPost(url, "admin", password, request); + + log.debug("result = " + commandResult); + + final int completionCode = commandResult.getInt("overallCompletionCode"); + final int reasonCode = commandResult.getInt("overallReasonCode"); + + if (completionCode == 2 && reasonCode == 3008) { + return 0; + } else if (completionCode == 0 && reasonCode == 0) { + return commandResult.getJSONArray("commandResponse").length(); + } else { + return -1; + } + } catch (final JSONException e) { + throw new RuntimeException(e); + } + } + + public static class MQRestAPIHelperBuilder { + private String qmgrname; + private int portnum; + private String password; + + public MQRestAPIHelperBuilder qmgrname(final String qmgrname) { + this.qmgrname = qmgrname; + return this; + } + + public MQRestAPIHelperBuilder portnum(final int portnum) { + this.portnum = portnum; + return this; + } + + public MQRestAPIHelperBuilder password(final String password) { + this.password = password; + return this; + } + + public MQRestAPIHelper build() { + return new MQRestAPIHelper(qmgrname, portnum, password); + } + + } +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/MessageDescriptorBuilder.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/MessageDescriptorBuilder.java new file mode 100644 index 0000000..1651099 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/MessageDescriptorBuilder.java @@ -0,0 +1,51 @@ +/** + * Copyright 2024 IBM Corporation + *

+ * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink.util; + +import javax.jms.JMSContext; +import javax.jms.JMSException; +import javax.jms.Message; + +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.sink.SinkRecord; + +import com.ibm.eventstreams.connect.mqsink.builders.DefaultMessageBuilder; +import com.ibm.msg.client.jms.JmsConstants; + +public class MessageDescriptorBuilder extends DefaultMessageBuilder { + + @Override + public Message getJMSMessage(JMSContext jmsCtxt, SinkRecord record) { + + Message message = super.getJMSMessage(jmsCtxt, record); + + // add MQMD values + // JMS_IBM_MQMD_MsgId - byte[] + // JMS_IBM_MQMD_ApplIdentityData - string + // JMS_IBM_MQMD_PutApplName - string + // https://www.ibm.com/docs/en/ibm-mq/9.3?topic=application-jms-message-object-properties + try { + message.setObjectProperty(JmsConstants.JMS_IBM_MQMD_MSGID, "ThisIsMyId".getBytes()); + message.setStringProperty(JmsConstants.JMS_IBM_MQMD_APPLIDENTITYDATA, "ThisIsMyApplicationData"); + message.setStringProperty(JmsConstants.JMS_IBM_MQMD_PUTAPPLNAME, "ThisIsMyPutApplicationName"); + + } catch (JMSException e) { + throw new ConnectException("Failed to write property", e); + } + + return message; + } +} diff --git a/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/SinkRecordBuilderForTest.java b/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/SinkRecordBuilderForTest.java new file mode 100644 index 0000000..8c048d2 --- /dev/null +++ b/src/integration/java/com/ibm/eventstreams/connect/mqsink/util/SinkRecordBuilderForTest.java @@ -0,0 +1,78 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink.util; + +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.sink.SinkRecord; + +public class SinkRecordBuilderForTest { + private String topic; + private Integer partition; + private Schema keySchema; + private Object key; + private Schema valueSchema; + private Object value; + private Long offset; + + public SinkRecordBuilderForTest() { + } + + public SinkRecordBuilderForTest topic(final String topic) { + this.topic = topic; + return this; + } + + public SinkRecordBuilderForTest partition(final Integer partition) { + this.partition = partition; + return this; + } + + public SinkRecordBuilderForTest keySchema(final Schema keySchema) { + this.keySchema = keySchema; + return this; + } + + public SinkRecordBuilderForTest key(final Object key) { + this.key = key; + return this; + } + + public SinkRecordBuilderForTest valueSchema(final Schema valueSchema) { + this.valueSchema = valueSchema; + return this; + } + + public SinkRecordBuilderForTest value(final Object value) { + this.value = value; + return this; + } + + public SinkRecordBuilderForTest offset(final Long offset) { + this.offset = offset; + return this; + } + + public SinkRecord build() { + return new SinkRecord(this.topic, this.partition, this.keySchema, this.key, this.valueSchema, this.value, + this.offset); + } + + public String toString() { + return "SinkRecord.SinkRecordBuilder(topic=" + this.topic + ", partition=" + this.partition + ", keySchema=" + + this.keySchema + ", key=" + this.key + ", valueSchema=" + this.valueSchema + ", value=" + this.value + + ")"; + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/ExceptionProcessor.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/ExceptionProcessor.java new file mode 100644 index 0000000..14e255e --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/ExceptionProcessor.java @@ -0,0 +1,98 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink; + +import javax.jms.JMSException; + +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.errors.RetriableException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.ibm.mq.MQException; +import com.ibm.mq.constants.MQConstants; + +public class ExceptionProcessor { + + private static final Logger log = LoggerFactory.getLogger(ExceptionProcessor.class); + + protected static int getReason(final Throwable exc) { + int reason = -1; + + // Try to extract the MQ reason code to see if it's a retriable exception + Throwable t = exc.getCause(); + while (t != null) { + if (t instanceof MQException) { + final MQException mqe = (MQException) t; + log.error("MQ error: CompCode {}, Reason {} {}", mqe.getCompCode(), mqe.getReason(), + MQConstants.lookupReasonCode(mqe.getReason())); + reason = mqe.getReason(); + break; + } else if (t instanceof JMSException) { + final JMSException jmse = (JMSException) t; + log.error("JMS exception: error code {}", jmse.getErrorCode()); + } + + t = t.getCause(); // Moves t up the stack trace until it is null. + } + return reason; + } + + public static boolean isClosable(final Throwable exc) { + final int reason = getReason(exc); + if (reason == MQConstants.MQRC_Q_FULL || reason == MQConstants.MQRC_PUT_INHIBITED) { + log.info("A queue has the GET operation intentionally inhibited, wait for next poll."); + return false; + } + log.info(" All MQ connections will be closed."); + return true; + } + + public static boolean isRetriable(final Throwable exc) { + final int reason = getReason(exc); + switch (reason) { + // These reason codes indicate that the connection can be just retried later + // will probably recover + case MQConstants.MQRC_BACKED_OUT: + case MQConstants.MQRC_CHANNEL_NOT_AVAILABLE: + case MQConstants.MQRC_CONNECTION_BROKEN: + case MQConstants.MQRC_HOST_NOT_AVAILABLE: + case MQConstants.MQRC_NOT_AUTHORIZED: + case MQConstants.MQRC_Q_MGR_NOT_AVAILABLE: + case MQConstants.MQRC_Q_MGR_QUIESCING: + case MQConstants.MQRC_Q_MGR_STOPPING: + case MQConstants.MQRC_UNEXPECTED_ERROR: + case MQConstants.MQRC_Q_FULL: + case MQConstants.MQRC_PUT_INHIBITED: + log.info("JMS exception is retriable, wait for next poll."); + return true; + } + log.info("JMS exception is not retriable, the connector is in a failed state."); + return false; + } + + /** + * Handles exceptions from MQ. Some JMS exceptions are treated as retriable + * meaning that the connector can keep running and just trying again is likely + * to fix things. + */ + public static ConnectException handleException(final Throwable exc) { + if (isRetriable(exc)) { + return new RetriableException(exc); + } + return new ConnectException(exc); + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/JMSWorker.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/JMSWorker.java new file mode 100644 index 0000000..da588ab --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/JMSWorker.java @@ -0,0 +1,395 @@ +/** + * Copyright 2017, 2020, 2023, 2024 IBM Corporation + *

+ * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink; + +import java.net.MalformedURLException; +import java.util.HashMap; +import java.util.Locale; +import java.util.Map; +import java.util.Optional; + +import javax.jms.DeliveryMode; +import javax.jms.InvalidDestinationRuntimeException; +import javax.jms.JMSConsumer; +import javax.jms.JMSContext; +import javax.jms.JMSException; +import javax.jms.JMSProducer; +import javax.jms.JMSRuntimeException; +import javax.jms.Message; +import javax.jms.TextMessage; + +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.errors.RetriableException; +import org.apache.kafka.connect.sink.SinkRecord; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.ibm.eventstreams.connect.mqsink.builders.MessageBuilder; +import com.ibm.eventstreams.connect.mqsink.builders.MessageBuilderFactory; +import com.ibm.mq.jms.MQConnectionFactory; +import com.ibm.mq.jms.MQQueue; +import com.ibm.msg.client.wmq.WMQConstants; + +/** + * Writes messages to MQ using JMS. Uses a transacted session, adding messages + * to the current + * transaction until told to commit. Automatically reconnects as needed. + */ +public class JMSWorker { + private static final Logger log = LoggerFactory.getLogger(JMSWorker.class); + + // JMS factory and context + private MQConnectionFactory mqConnFactory; + private JMSContext jmsCtxt; + protected JMSProducer jmsProd; + protected JMSConsumer jmsCons; + + // MQ objects + private MQQueue queue; + protected MQQueue stateQueue; + + // State + private boolean connected = false; // Whether connected to MQ + private long reconnectDelayMillis = RECONNECT_DELAY_MILLIS_MIN; // Delay between repeated reconnect attempts + + // Constants + private int deliveryMode = Message.DEFAULT_DELIVERY_MODE; + private long timeToLive = Message.DEFAULT_TIME_TO_LIVE; + + final private static long RECONNECT_DELAY_MILLIS_MIN = 64L; + final private static long RECONNECT_DELAY_MILLIS_MAX = 8192L; + + protected ObjectMapper mapper; + + private boolean isExactlyOnceMode = false; + + private MQConnectionHelper mqConnectionHelper; + + private MessageBuilder messageBuilder; + + public JMSWorker() { + mapper = new ObjectMapper(); + } + + /** + * Configure this class. + * + * @param config + * @throws ConnectException + */ + public void configure(final AbstractConfig config) throws ConnectException { + log.trace("[{}] Entry {}.configure, props={}", Thread.currentThread().getId(), + this.getClass().getName()); + + isExactlyOnceMode = MQSinkConnector.configSupportsExactlyOnce(config); + mqConnectionHelper = new MQConnectionHelper(config); + + if (mqConnectionHelper.getUseIBMCipherMappings() != null) { + System.setProperty("com.ibm.mq.cfg.useIBMCipherMappings", mqConnectionHelper.getUseIBMCipherMappings()); + } + + try { + mqConnFactory = mqConnectionHelper.createMQConnFactory(); + queue = configureQueue(mqConnectionHelper.getQueueName(), mqConnectionHelper.isMessageBodyJms()); + final Boolean mqmdWriteEnabled = config.getBoolean(MQSinkConfig.CONFIG_NAME_MQ_MQMD_WRITE_ENABLED); + queue.setBooleanProperty(WMQConstants.WMQ_MQMD_WRITE_ENABLED, mqmdWriteEnabled); + + if (mqmdWriteEnabled) { + String mqmdMessageContext = config.getString(MQSinkConfig.CONFIG_NAME_MQ_MQMD_MESSAGE_CONTEXT); + if (mqmdMessageContext != null) { + mqmdMessageContext = mqmdMessageContext.toLowerCase(Locale.ENGLISH); + } + if ("identity".equals(mqmdMessageContext)) { + queue.setIntProperty(WMQConstants.WMQ_MQMD_MESSAGE_CONTEXT, + WMQConstants.WMQ_MDCTX_SET_IDENTITY_CONTEXT); + } else if ("all".equals(mqmdMessageContext)) { + queue.setIntProperty(WMQConstants.WMQ_MQMD_MESSAGE_CONTEXT, WMQConstants.WMQ_MDCTX_SET_ALL_CONTEXT); + } + } + if (isExactlyOnceMode) { + stateQueue = configureQueue(mqConnectionHelper.getStateQueueName(), true); + } + + this.timeToLive = mqConnectionHelper.getTimeToLive(); + + this.deliveryMode = mqConnectionHelper.isPersistent() ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT; + + this.messageBuilder = MessageBuilderFactory.getMessageBuilder(config); + } catch (JMSException | JMSRuntimeException | MalformedURLException jmse) { + log.error("JMS exception {}", jmse); + throw new JMSWorkerConnectionException("JMS connection failed", jmse); + } + + log.trace("[{}] Exit {}.configure", Thread.currentThread().getId(), this.getClass().getName()); + } + + /** Connects to MQ. */ + public void connect() { + log.trace("[{}] Entry {}.connect", Thread.currentThread().getId(), this.getClass().getName()); + + createJMSContext(); + + createConsumerForStateQueue(); + + configureProducer(); + + connected = true; + log.info("Connection to MQ established"); + log.trace("[{}] Exit {}.connect", Thread.currentThread().getId(), this.getClass().getName()); + } + + /** + * Internal method to connect to MQ. + * + * @throws RetriableException + * Operation failed, but connector should continue to + * retry. + * @throws ConnectException + * Operation failed and connector should stop. + */ + private void maybeReconnect() throws ConnectException, RetriableException { + log.trace("[{}] Entry {}.maybeReconnect", Thread.currentThread().getId(), this.getClass().getName()); + + if (connected) { + log.trace("[{}] Exit {}.maybeReconnect", Thread.currentThread().getId(), this.getClass().getName()); + return; + } + + try { + connect(); + reconnectDelayMillis = RECONNECT_DELAY_MILLIS_MIN; + } catch (final JMSRuntimeException jmse) { + // Delay slightly so that repeated reconnect loops don't run too fast + log.info("Connection to MQ could not be established"); + try { + Thread.sleep(reconnectDelayMillis); + } catch (final InterruptedException ie) { + } + + if (reconnectDelayMillis < RECONNECT_DELAY_MILLIS_MAX) { + reconnectDelayMillis = reconnectDelayMillis * 2; + } + + log.error("JMS exception {}", jmse); + log.trace("[{}] Exit {}.maybeReconnect, retval=JMSRuntimeException", Thread.currentThread().getId(), + this.getClass().getName()); + throw jmse; + } + + log.trace("[{}] Exit {}.maybeReconnect, retval=true", Thread.currentThread().getId(), + this.getClass().getName()); + } + + /** + * Sends a message to MQ. Adds the message to the current transaction. + * Reconnects to MQ if required. + * + * @param r + * The message and schema to send + * + * @throws RetriableException + * Operation failed, but connector should continue to + * retry. + * @throws ConnectException + * Operation failed and connector should stop. + */ + public void send(final SinkRecord r) throws ConnectException, RetriableException { + log.trace("[{}] Entry {}.send", Thread.currentThread().getId(), this.getClass().getName()); + + maybeReconnect(); + + sendSinkRecordToMQ(queue, r); + + log.trace("[{}] Exit {}.send", Thread.currentThread().getId(), this.getClass().getName()); + } + + /** Closes the connection. */ + public void close() { + log.trace("[{}] Entry {}.close", Thread.currentThread().getId(), this.getClass().getName()); + + try { + connected = false; + + if (jmsCtxt != null) { + jmsCtxt.close(); + } + } catch (final JMSRuntimeException jmse) { + log.error("", jmse); + } finally { + jmsCtxt = null; + log.debug("Connection to MQ closed"); + } + + log.trace("[{}] Exit {}.close", Thread.currentThread().getId(), this.getClass().getName()); + } + + /** + * Read a message from the state queue. + * + * @return the message + * @throws JsonProcessingException + * @throws JMSRuntimeException + * @throws JMSException + */ + public Optional> readFromStateQueue() + throws JsonProcessingException, JMSRuntimeException, JMSException { + maybeReconnect(); + if (jmsCons == null) { + return Optional.empty(); + } + try { + final TextMessage message = (TextMessage) jmsCons.receiveNoWait(); + if (message == null) { + return Optional.empty(); + } + final HashMap stateMap = mapper.readValue(message.getText(), + new TypeReference>() { + }); + return Optional.of(stateMap); + } catch (final JsonProcessingException jpe) { + log.error("An error occurred while processing (parsing) JSON content from state queue: {}", + jpe.getMessage()); + throw jpe; + } catch (JMSException | JMSRuntimeException jmse) { + log.error("An error occurred while reading the state queue: {}", jmse.getMessage()); + throw jmse; + } + } + + /** + * Create a queue object. If mbj is true, then create a queue that supports + * JMS message body. + * + * @param queueName + * the name of the queue + * @param isJms + * whether the queue supports JMS message body + * @return the queue object + */ + private MQQueue configureQueue(final String queueName, final Boolean isJms) + throws JMSException { + final MQQueue queue = new MQQueue(queueName); + queue.setMessageBodyStyle(isJms ? WMQConstants.WMQ_MESSAGE_BODY_JMS : WMQConstants.WMQ_MESSAGE_BODY_MQ); + return queue; + } + + /** + * Send the last message to the MQ queue. + * + * @param lastCommittedOffsetMap + * @throws Exception + * + * @throws RetriableException + * Operation failed, but connector should + * continue to + * retry. + * @throws ConnectException + * Operation failed and connector should stop. + * @throws JMSException + * @throws JsonProcessingException + */ + public void writeLastRecordOffsetToStateQueue(final Map lastCommittedOffsetMap) + throws JsonProcessingException, JMSRuntimeException, JMSException { + log.trace("[{}] Entry {}.writeLastRecordOffsetToStateQueue", Thread.currentThread().getId(), + this.getClass().getName()); + + maybeReconnect(); + + if (lastCommittedOffsetMap == null) { + log.error("Last committed offset map is null"); + log.trace("[{}] Exit {}.writeLastRecordOffsetToStateQueue", Thread.currentThread().getId(), + this.getClass().getName()); + return; + } + + final TextMessage message = jmsCtxt.createTextMessage(); + try { + message.setText(mapper.writeValueAsString(lastCommittedOffsetMap)); + jmsProd.send(stateQueue, message); + } catch (final JsonProcessingException jpe) { + log.error("An error occurred while writing to the state queue, Json Processing Exception {}", jpe); + throw jpe; + } catch (JMSRuntimeException | JMSException jmse) { + log.error("An error occurred while writing to the state queue, JMS Exception {}", jmse); + throw jmse; + } + log.trace("[{}] Exit {}.writeLastRecordOffsetToStateQueue", Thread.currentThread().getId(), + this.getClass().getName()); + } + + /** + * Commits the current transaction. + * + * @throws RetriableException + * Operation failed, but connector should continue to + * retry. + * @throws ConnectException + * Operation failed and connector should stop. + */ + public void commit() { + log.trace("[{}] Entry {}.commit", Thread.currentThread().getId(), this.getClass().getName()); + + maybeReconnect(); + + jmsCtxt.commit(); + log.trace("[{}] Exit {}.commit", Thread.currentThread().getId(), this.getClass().getName()); + } + + /** + * Builds the JMS message and sends it to MQ. + * + * @param queue + * The MQ queue to send the message to + * @param record + * The message and schema to send + * @throws JMSException + */ + private void sendSinkRecordToMQ(final MQQueue queue, final SinkRecord record) { + maybeReconnect(); + final Message m = messageBuilder.fromSinkRecord(jmsCtxt, record); + jmsProd.send(queue, m); + } + + protected void createJMSContext() { + if (mqConnectionHelper.getUserName() != null) { + jmsCtxt = mqConnFactory.createContext(mqConnectionHelper.getUserName(), mqConnectionHelper.getPassword().value(), + JMSContext.SESSION_TRANSACTED); + } else { + jmsCtxt = mqConnFactory.createContext(JMSContext.SESSION_TRANSACTED); + } + } + + protected void configureProducer() { + jmsProd = jmsCtxt.createProducer(); + jmsProd.setDeliveryMode(deliveryMode); + jmsProd.setTimeToLive(timeToLive); + } + + protected void createConsumerForStateQueue() { + if (stateQueue != null) { + try { + jmsCons = jmsCtxt.createConsumer(stateQueue); + } catch (final InvalidDestinationRuntimeException e) { + log.error("An invalid state queue is specified.", e); + throw e; + } + } + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/JMSWorkerConnectionException.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/JMSWorkerConnectionException.java new file mode 100644 index 0000000..1566b09 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/JMSWorkerConnectionException.java @@ -0,0 +1,28 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink; + +public class JMSWorkerConnectionException extends RuntimeException { + + public JMSWorkerConnectionException(final String message) { + super(message); + } + + public JMSWorkerConnectionException(final String message, final Throwable exc) { + super(message, exc); + } + +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/JMSWriter.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/JMSWriter.java deleted file mode 100644 index 1aff03c..0000000 --- a/src/main/java/com/ibm/eventstreams/connect/mqsink/JMSWriter.java +++ /dev/null @@ -1,448 +0,0 @@ -/** - * Copyright 2017, 2020 IBM Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.ibm.eventstreams.connect.mqsink; - -import com.ibm.mq.MQException; -import com.ibm.mq.constants.MQConstants; -import com.ibm.mq.jms.MQConnectionFactory; -import com.ibm.mq.jms.MQQueue; -import com.ibm.eventstreams.connect.mqsink.builders.MessageBuilder; -import com.ibm.msg.client.wmq.WMQConstants; - -import java.io.FileInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.MalformedURLException; -import java.net.URL; -import java.security.GeneralSecurityException; -import java.security.KeyStore; -import java.security.SecureRandom; -import java.util.Map; - -import javax.jms.DeliveryMode; -import javax.jms.JMSContext; -import javax.jms.JMSException; -import javax.jms.JMSProducer; -import javax.jms.JMSRuntimeException; -import javax.jms.Message; -import javax.net.ssl.KeyManager; -import javax.net.ssl.KeyManagerFactory; -import javax.net.ssl.SSLContext; -import javax.net.ssl.TrustManager; -import javax.net.ssl.TrustManagerFactory; - -import org.apache.kafka.connect.errors.ConnectException; -import org.apache.kafka.connect.errors.RetriableException; -import org.apache.kafka.connect.sink.SinkRecord; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - - -/** - * Writes messages to MQ using JMS. Uses a transacted session, adding messages to the current - * transaction until told to commit. Automatically reconnects as needed. - */ -public class JMSWriter { - private static final Logger log = LoggerFactory.getLogger(JMSWriter.class); - - // Configs - private String userName; - private String password; - - // JMS factory and context - private MQConnectionFactory mqConnFactory; - private JMSContext jmsCtxt; - private JMSProducer jmsProd; - private MQQueue queue; - private int deliveryMode = Message.DEFAULT_DELIVERY_MODE; - private long timeToLive = Message.DEFAULT_TIME_TO_LIVE; - - private MessageBuilder builder; - - private boolean connected = false; // Whether connected to MQ - private boolean inflight = false; // Whether messages in-flight in current transaction - private long reconnectDelayMillis = RECONNECT_DELAY_MILLIS_MIN; // Delay between repeated reconnect attempts - - final private static long RECONNECT_DELAY_MILLIS_MIN = 64L; - final private static long RECONNECT_DELAY_MILLIS_MAX = 8192L; - - public JMSWriter() { - } - - /** - * Configure this class. - * - * @param props initial configuration - * - * @throws ConnectException Operation failed and connector should stop. - */ - public void configure(final Map props) { - log.trace("[{}] Entry {}.configure, props={}", Thread.currentThread().getId(), this.getClass().getName(), props); - - final String queueManager = props.get(MQSinkConnector.CONFIG_NAME_MQ_QUEUE_MANAGER); - final String connectionMode = props.get(MQSinkConnector.CONFIG_NAME_MQ_CONNECTION_MODE); - final String connectionNameList = props.get(MQSinkConnector.CONFIG_NAME_MQ_CONNECTION_NAME_LIST); - final String channelName = props.get(MQSinkConnector.CONFIG_NAME_MQ_CHANNEL_NAME); - final String queueName = props.get(MQSinkConnector.CONFIG_NAME_MQ_QUEUE); - final String userName = props.get(MQSinkConnector.CONFIG_NAME_MQ_USER_NAME); - final String password = props.get(MQSinkConnector.CONFIG_NAME_MQ_PASSWORD); - final String ccdtUrl = props.get(MQSinkConnector.CONFIG_NAME_MQ_CCDT_URL); - final String builderClass = props.get(MQSinkConnector.CONFIG_NAME_MQ_MESSAGE_BUILDER); - final String mbj = props.get(MQSinkConnector.CONFIG_NAME_MQ_MESSAGE_BODY_JMS); - final String timeToLive = props.get(MQSinkConnector.CONFIG_NAME_MQ_TIME_TO_LIVE); - final String persistent = props.get(MQSinkConnector.CONFIG_NAME_MQ_PERSISTENT); - final String sslCipherSuite = props.get(MQSinkConnector.CONFIG_NAME_MQ_SSL_CIPHER_SUITE); - final String sslPeerName = props.get(MQSinkConnector.CONFIG_NAME_MQ_SSL_PEER_NAME); - final String sslKeystoreLocation = props.get(MQSinkConnector.CONFIG_NAME_MQ_SSL_KEYSTORE_LOCATION); - final String sslKeystorePassword = props.get(MQSinkConnector.CONFIG_NAME_MQ_SSL_KEYSTORE_PASSWORD); - final String sslTruststoreLocation = props.get(MQSinkConnector.CONFIG_NAME_MQ_SSL_TRUSTSTORE_LOCATION); - final String sslTruststorePassword = props.get(MQSinkConnector.CONFIG_NAME_MQ_SSL_TRUSTSTORE_PASSWORD); - final String useMQCSP = props.get(MQSinkConnector.CONFIG_NAME_MQ_USER_AUTHENTICATION_MQCSP); - final String useIBMCipherMappings = props.get(MQSinkConnector.CONFIG_NAME_MQ_SSL_USE_IBM_CIPHER_MAPPINGS); - - if (useIBMCipherMappings != null) { - System.setProperty("com.ibm.mq.cfg.useIBMCipherMappings", useIBMCipherMappings); - } - - int transportType = WMQConstants.WMQ_CM_CLIENT; - if (connectionMode != null) { - if (connectionMode.equals(MQSinkConnector.CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT)) { - transportType = WMQConstants.WMQ_CM_CLIENT; - } else if (connectionMode.equals(MQSinkConnector.CONFIG_VALUE_MQ_CONNECTION_MODE_BINDINGS)) { - transportType = WMQConstants.WMQ_CM_BINDINGS; - } else { - log.error("Unsupported MQ connection mode {}", connectionMode); - throw new ConnectException("Unsupported MQ connection mode"); - } - } - - try { - mqConnFactory = new MQConnectionFactory(); - mqConnFactory.setTransportType(transportType); - mqConnFactory.setQueueManager(queueManager); - mqConnFactory.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, true); - if (useMQCSP != null) { - mqConnFactory.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, Boolean.parseBoolean(useMQCSP)); - } - - if (transportType == WMQConstants.WMQ_CM_CLIENT) { - if (ccdtUrl != null) { - final URL ccdtUrlObject; - try { - ccdtUrlObject = new URL(ccdtUrl); - } catch (final MalformedURLException e) { - log.error("MalformedURLException exception {}", e); - throw new ConnectException("CCDT file url invalid", e); - } - mqConnFactory.setCCDTURL(ccdtUrlObject); - } else { - mqConnFactory.setConnectionNameList(connectionNameList); - mqConnFactory.setChannel(channelName); - } - - if (sslCipherSuite != null) { - mqConnFactory.setSSLCipherSuite(sslCipherSuite); - if (sslPeerName != null) { - mqConnFactory.setSSLPeerName(sslPeerName); - } - } - - if (sslKeystoreLocation != null || sslTruststoreLocation != null) { - final SSLContext sslContext = buildSslContext(sslKeystoreLocation, sslKeystorePassword, sslTruststoreLocation, sslTruststorePassword); - mqConnFactory.setSSLSocketFactory(sslContext.getSocketFactory()); - } - } - - queue = new MQQueue(queueName); - - this.userName = userName; - this.password = password; - - queue.setMessageBodyStyle(WMQConstants.WMQ_MESSAGE_BODY_MQ); - if (mbj != null) { - if (Boolean.parseBoolean(mbj)) { - queue.setMessageBodyStyle(WMQConstants.WMQ_MESSAGE_BODY_JMS); - } - } - - if (timeToLive != null) { - this.timeToLive = Long.parseLong(timeToLive); - } - if (persistent != null) { - this.deliveryMode = Boolean.parseBoolean(persistent) ? DeliveryMode.PERSISTENT : DeliveryMode.NON_PERSISTENT; - } - } catch (JMSException | JMSRuntimeException jmse) { - log.error("JMS exception {}", jmse); - throw new ConnectException(jmse); - } - - try { - final Class c = Class.forName(builderClass).asSubclass(MessageBuilder.class); - builder = c.newInstance(); - builder.configure(props); - } catch (ClassNotFoundException | ClassCastException | IllegalAccessException | InstantiationException | NullPointerException exc) { - log.error("Could not instantiate message builder {}", builderClass); - throw new ConnectException("Could not instantiate message builder", exc); - } - - log.trace("[{}] Exit {}.configure", Thread.currentThread().getId(), this.getClass().getName()); - } - - /** - * Connects to MQ. - */ - public void connect() { - log.trace("[{}] Entry {}.connect", Thread.currentThread().getId(), this.getClass().getName()); - - try { - if (userName != null) { - jmsCtxt = mqConnFactory.createContext(userName, password, JMSContext.SESSION_TRANSACTED); - } else { - jmsCtxt = mqConnFactory.createContext(JMSContext.SESSION_TRANSACTED); - } - - jmsProd = jmsCtxt.createProducer(); - jmsProd.setDeliveryMode(deliveryMode); - jmsProd.setTimeToLive(timeToLive); - connected = true; - - log.info("Connection to MQ established"); - } catch (final JMSRuntimeException jmse) { - log.info("Connection to MQ could not be established"); - log.error("JMS exception {}", jmse); - handleException(jmse); - } - - log.trace("[{}] Exit {}.connect", Thread.currentThread().getId(), this.getClass().getName()); - } - - /** - * Sends a message to MQ. Adds the message to the current transaction. Reconnects to MQ if required. - * - * @param r The message and schema to send - * - * @throws RetriableException Operation failed, but connector should continue to retry. - * @throws ConnectException Operation failed and connector should stop. - */ - public void send(final SinkRecord r) throws ConnectException, RetriableException { - log.trace("[{}] Entry {}.send", Thread.currentThread().getId(), this.getClass().getName()); - - connectInternal(); - - try { - final Message m = builder.fromSinkRecord(jmsCtxt, r); - inflight = true; - jmsProd.send(queue, m); - } catch (final JMSRuntimeException jmse) { - log.error("JMS exception {}", jmse); - throw handleException(jmse); - } - - log.trace("[{}] Exit {}.send", Thread.currentThread().getId(), this.getClass().getName()); - } - - - /** - * Commits the current transaction. - * - * @throws RetriableException Operation failed, but connector should continue to retry. - * @throws ConnectException Operation failed and connector should stop. - */ - public void commit() throws ConnectException, RetriableException { - log.trace("[{}] Entry {}.commit", Thread.currentThread().getId(), this.getClass().getName()); - - connectInternal(); - try { - if (inflight) { - inflight = false; - } - - jmsCtxt.commit(); - } catch (final JMSRuntimeException jmse) { - log.error("JMS exception {}", jmse); - throw handleException(jmse); - } - - log.trace("[{}] Exit {}.commit", Thread.currentThread().getId(), this.getClass().getName()); - } - - /** - * Closes the connection. - */ - public void close() { - log.trace("[{}] Entry {}.close", Thread.currentThread().getId(), this.getClass().getName()); - - try { - inflight = false; - connected = false; - - if (jmsCtxt != null) { - jmsCtxt.close(); - } - } catch (final JMSRuntimeException jmse) { - jmse.printStackTrace(); - } finally { - jmsCtxt = null; - log.debug("Connection to MQ closed"); - } - - log.trace("[{}] Exit {}.close", Thread.currentThread().getId(), this.getClass().getName()); - } - - /** - * Internal method to connect to MQ. - * - * @throws RetriableException Operation failed, but connector should continue to retry. - * @throws ConnectException Operation failed and connector should stop. - */ - private void connectInternal() throws ConnectException, RetriableException { - log.trace("[{}] Entry {}.connectInternal", Thread.currentThread().getId(), this.getClass().getName()); - - if (connected) { - return; - } - - try { - if (userName != null) { - jmsCtxt = mqConnFactory.createContext(userName, password, JMSContext.SESSION_TRANSACTED); - } else { - jmsCtxt = mqConnFactory.createContext(JMSContext.SESSION_TRANSACTED); - } - - jmsProd = jmsCtxt.createProducer(); - jmsProd.setDeliveryMode(deliveryMode); - jmsProd.setTimeToLive(timeToLive); - reconnectDelayMillis = RECONNECT_DELAY_MILLIS_MIN; - connected = true; - } catch (final JMSRuntimeException jmse) { - // Delay slightly so that repeated reconnect loops don't run too fast - try { - Thread.sleep(reconnectDelayMillis); - } catch (final InterruptedException e) { - e.printStackTrace(); - } - - if (reconnectDelayMillis < RECONNECT_DELAY_MILLIS_MAX) { - reconnectDelayMillis = reconnectDelayMillis * 2; - } - - log.error("JMS exception {}", jmse); - throw handleException(jmse); - } - - log.trace("[{}] Exit {}.connectInternal", Thread.currentThread().getId(), this.getClass().getName()); - } - - /** - * Handles exceptions from MQ. Some JMS exceptions are treated as retriable meaning that the - * connector can keep running and just trying again is likely to fix things. - */ - private ConnectException handleException(final Throwable exc) { - boolean isRetriable = false; - boolean mustClose = true; - int reason = -1; - - // Try to extract the MQ reason code to see if it's a retriable exception - Throwable t = exc.getCause(); - while (t != null) { - if (t instanceof MQException) { - final MQException mqe = (MQException) t; - log.error("MQ error: CompCode {}, Reason {}", mqe.getCompCode(), mqe.getReason()); - reason = mqe.getReason(); - break; - } - t = t.getCause(); - } - - switch (reason) { - // These reason codes indicate that the connection needs to be closed, but just retrying later - // will probably recover - case MQConstants.MQRC_BACKED_OUT: - case MQConstants.MQRC_CHANNEL_NOT_AVAILABLE: - case MQConstants.MQRC_CONNECTION_BROKEN: - case MQConstants.MQRC_HOST_NOT_AVAILABLE: - case MQConstants.MQRC_NOT_AUTHORIZED: - case MQConstants.MQRC_Q_MGR_NOT_AVAILABLE: - case MQConstants.MQRC_Q_MGR_QUIESCING: - case MQConstants.MQRC_Q_MGR_STOPPING: - case MQConstants.MQRC_UNEXPECTED_ERROR: - isRetriable = true; - break; - - // These reason codes indicates that the connect is still OK, but just retrying later - // will probably recover - possibly with administrative action on the queue manager - case MQConstants.MQRC_Q_FULL: - case MQConstants.MQRC_PUT_INHIBITED: - isRetriable = true; - mustClose = false; - break; - } - - if (mustClose) { - close(); - } - - if (isRetriable) { - return new RetriableException(exc); - } - - return new ConnectException(exc); - } - - private SSLContext buildSslContext(final String sslKeystoreLocation, final String sslKeystorePassword, final String sslTruststoreLocation, final String sslTruststorePassword) { - log.trace("[{}] Entry {}.buildSslContext", Thread.currentThread().getId(), this.getClass().getName()); - - try { - KeyManager[] keyManagers = null; - TrustManager[] trustManagers = null; - - if (sslKeystoreLocation != null) { - final KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(loadKeyStore(sslKeystoreLocation, sslKeystorePassword), sslKeystorePassword.toCharArray()); - keyManagers = kmf.getKeyManagers(); - } - - if (sslTruststoreLocation != null) { - final TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm()); - tmf.init(loadKeyStore(sslTruststoreLocation, sslTruststorePassword)); - trustManagers = tmf.getTrustManagers(); - } - - final SSLContext sslContext = SSLContext.getInstance("TLS"); - sslContext.init(keyManagers, trustManagers, new SecureRandom()); - - log.trace("[{}] Exit {}.buildSslContext, retval={}", Thread.currentThread().getId(), this.getClass().getName(), sslContext); - return sslContext; - } catch (final GeneralSecurityException e) { - throw new ConnectException("Error creating SSLContext", e); - } - } - - private KeyStore loadKeyStore(final String location, final String password) throws GeneralSecurityException { - log.trace("[{}] Entry {}.loadKeyStore", Thread.currentThread().getId(), this.getClass().getName()); - - try (final InputStream ksStr = new FileInputStream(location)) { - final KeyStore ks = KeyStore.getInstance("JKS"); - ks.load(ksStr, password.toCharArray()); - - log.trace("[{}] Exit {}.loadKeyStore, retval={}", Thread.currentThread().getId(), this.getClass().getName(), ks); - return ks; - } catch (final IOException e) { - throw new ConnectException("Error reading keystore " + location, e); - } - } -} \ No newline at end of file diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/MQConnectionHelper.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/MQConnectionHelper.java new file mode 100644 index 0000000..cf0b8e0 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/MQConnectionHelper.java @@ -0,0 +1,137 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink; + +import java.net.MalformedURLException; +import java.net.URL; + +import javax.jms.JMSException; +import javax.net.ssl.SSLContext; + +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.config.types.Password; +import org.apache.kafka.connect.errors.ConnectException; + +import com.ibm.mq.jms.MQConnectionFactory; +import com.ibm.msg.client.wmq.WMQConstants; + +public class MQConnectionHelper { + private AbstractConfig config; + + public String getQueueManagerName() { + return config.getString(MQSinkConfig.CONFIG_NAME_MQ_QUEUE_MANAGER); + } + + public String getQueueName() { + return config.getString(MQSinkConfig.CONFIG_NAME_MQ_QUEUE); + } + + public String getStateQueueName() { + return config.getString(MQSinkConfig.CONFIG_NAME_MQ_EXACTLY_ONCE_STATE_QUEUE); + } + + public String getUserName() { + return config.getString(MQSinkConfig.CONFIG_NAME_MQ_USER_NAME); + } + + public Password getPassword() { + return config.getPassword(MQSinkConfig.CONFIG_NAME_MQ_PASSWORD); + } + + public boolean isMessageBodyJms() { + return config.getBoolean(MQSinkConfig.CONFIG_NAME_MQ_MESSAGE_BODY_JMS); + } + + public Long getTimeToLive() { + return config.getLong(MQSinkConfig.CONFIG_NAME_MQ_TIME_TO_LIVE); + } + + public Boolean isPersistent() { + return config.getBoolean(MQSinkConfig.CONFIG_NAME_MQ_PERSISTENT); + } + + public String getUseIBMCipherMappings() { + return config.getString(MQSinkConfig.CONFIG_NAME_MQ_SSL_USE_IBM_CIPHER_MAPPINGS); + } + + public int getTransportType() { + return getTransportType(config.getString(MQSinkConfig.CONFIG_NAME_MQ_CONNECTION_MODE)); + } + + public MQConnectionHelper(final AbstractConfig config) { + this.config = config; + } + + /** + * Get the transport type from the connection mode. + * + * @param connectionMode + * the connection mode + * @return the transport type + * @throws ConnectException + * if the connection mode is not supported + */ + public static int getTransportType(final String connectionMode) { + int transportType = WMQConstants.WMQ_CM_CLIENT; + if (connectionMode != null) { + if (connectionMode.equals(MQSinkConfig.CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT)) { + transportType = WMQConstants.WMQ_CM_CLIENT; + } else if (connectionMode.equals(MQSinkConfig.CONFIG_VALUE_MQ_CONNECTION_MODE_BINDINGS)) { + transportType = WMQConstants.WMQ_CM_BINDINGS; + } + } + return transportType; + } + + /** + * * Create a MQ connection factory. + * + * The connection factory is configured with the supplied properties. + * + * @return + * @throws JMSException + */ + public MQConnectionFactory createMQConnFactory() throws JMSException, MalformedURLException { + final MQConnectionFactory mqConnFactory = new MQConnectionFactory(); + final int transportType = getTransportType(); + mqConnFactory.setTransportType(transportType); + mqConnFactory.setQueueManager(getQueueManagerName()); + mqConnFactory.setBooleanProperty(WMQConstants.USER_AUTHENTICATION_MQCSP, + config.getBoolean(MQSinkConfig.CONFIG_NAME_MQ_USER_AUTHENTICATION_MQCSP)); + + if (transportType == WMQConstants.WMQ_CM_CLIENT) { + final String ccdtUrl = config.getString(MQSinkConfig.CONFIG_NAME_MQ_CCDT_URL); + if (ccdtUrl != null) { + mqConnFactory.setCCDTURL(new URL(ccdtUrl)); + } else { + mqConnFactory.setConnectionNameList(config.getString(MQSinkConfig.CONFIG_NAME_MQ_CONNECTION_NAME_LIST)); + mqConnFactory.setChannel(config.getString(MQSinkConfig.CONFIG_NAME_MQ_CHANNEL_NAME)); + } + + mqConnFactory.setSSLCipherSuite(config.getString(MQSinkConfig.CONFIG_NAME_MQ_SSL_CIPHER_SUITE)); + mqConnFactory.setSSLPeerName(config.getString(MQSinkConfig.CONFIG_NAME_MQ_SSL_PEER_NAME)); + + final String sslKeystoreLocation = config.getString(MQSinkConfig.CONFIG_NAME_MQ_SSL_KEYSTORE_LOCATION); + final String sslTruststoreLocation = config.getString(MQSinkConfig.CONFIG_NAME_MQ_SSL_TRUSTSTORE_LOCATION); + if (sslKeystoreLocation != null || sslTruststoreLocation != null) { + final SSLContext sslContext = new SSLContextBuilder().buildSslContext(sslKeystoreLocation, + config.getPassword(MQSinkConfig.CONFIG_NAME_MQ_SSL_KEYSTORE_PASSWORD), sslTruststoreLocation, config.getPassword(MQSinkConfig.CONFIG_NAME_MQ_SSL_TRUSTSTORE_PASSWORD)); + mqConnFactory.setSSLSocketFactory(sslContext.getSocketFactory()); + } + } + return mqConnFactory; + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/MQSinkConfig.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/MQSinkConfig.java new file mode 100644 index 0000000..9795ddb --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/MQSinkConfig.java @@ -0,0 +1,317 @@ +/** + * Copyright 2023, 2024 IBM Corporation + *

+ * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink; + +import java.io.File; +import java.net.MalformedURLException; +import java.net.URL; + +import com.ibm.eventstreams.connect.mqsink.builders.MessageBuilder; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.kafka.connect.storage.Converter; +import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.config.ConfigDef.Importance; +import org.apache.kafka.common.config.ConfigDef.Range; +import org.apache.kafka.common.config.ConfigDef.Type; +import org.apache.kafka.common.config.ConfigDef.Width; +import org.apache.kafka.common.config.ConfigDef.Validator; +import org.apache.kafka.common.config.ConfigException; + +public class MQSinkConfig { + + public static final Logger log = LoggerFactory.getLogger(MQSinkConfig.class); + + public static final String CONFIG_GROUP_MQ = "mq"; + + public static final String CONFIG_NAME_MQ_QUEUE_MANAGER = "mq.queue.manager"; + public static final String CONFIG_DOCUMENTATION_MQ_QUEUE_MANAGER = "The name of the MQ queue manager."; + public static final String CONFIG_DISPLAY_MQ_QUEUE_MANAGER = "Queue manager"; + + public static final String CONFIG_NAME_MQ_CONNECTION_MODE = "mq.connection.mode"; + public static final String CONFIG_DOCUMENTATION_MQ_CONNECTION_MODE = "The connection mode - bindings or client."; + public static final String CONFIG_DISPLAY_MQ_CONNECTION_MODE = "Connection mode"; + public static final String CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT = "client"; + public static final String CONFIG_VALUE_MQ_CONNECTION_MODE_BINDINGS = "bindings"; + + public static final String CONFIG_NAME_MQ_CONNECTION_NAME_LIST = "mq.connection.name.list"; + public static final String CONFIG_DOCUMENTATION_MQ_CONNNECTION_NAME_LIST = "A list of one or more host(port) entries for connecting to the queue manager. Entries are separated with a comma."; + public static final String CONFIG_DISPLAY_MQ_CONNECTION_NAME_LIST = "List of connection names for queue manager"; + + public static final String CONFIG_NAME_MQ_CHANNEL_NAME = "mq.channel.name"; + public static final String CONFIG_DOCUMENTATION_MQ_CHANNEL_NAME = "The name of the server-connection channel."; + public static final String CONFIG_DISPLAY_MQ_CHANNEL_NAME = "Channel name"; + + public static final String CONFIG_NAME_MQ_QUEUE = "mq.queue"; + public static final String CONFIG_DOCUMENTATION_MQ_QUEUE = "The name of the target MQ queue."; + public static final String CONFIG_DISPLAY_MQ_QUEUE = "Target queue"; + + public static final String CONFIG_NAME_MQ_EXACTLY_ONCE_STATE_QUEUE = "mq.exactly.once.state.queue"; + public static final String CONFIG_DOCUMENTATION_MQ_EXACTLY_ONCE_STATE_QUEUE = "The name of the MQ queue used to store the state of the connector when exactly-once delivery is enabled."; + public static final String CONFIG_DISPLAY_MQ_EXACTLY_ONCE_STATE_QUEUE = "Exactly-once state queue"; + + public static final String CONFIG_NAME_MQ_USER_NAME = "mq.user.name"; + public static final String CONFIG_DOCUMENTATION_MQ_USER_NAME = "The user name for authenticating with the queue manager."; + public static final String CONFIG_DISPLAY_MQ_USER_NAME = "User name"; + + public static final String CONFIG_NAME_MQ_PASSWORD = "mq.password"; + public static final String CONFIG_DOCUMENTATION_MQ_PASSWORD = "The password for authenticating with the queue manager."; + public static final String CONFIG_DISPLAY_MQ_PASSWORD = "Password"; + + public static final String CONFIG_NAME_MQ_CCDT_URL = "mq.ccdt.url"; + public static final String CONFIG_DOCUMENTATION_MQ_CCDT_URL = "The CCDT URL to use to establish a connection to the queue manager."; + public static final String CONFIG_DISPLAY_MQ_CCDT_URL = "CCDT URL"; + + public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER = "mq.message.builder"; + public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER = "The class used to build the MQ messages."; + public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER = "Message builder"; + + public static final String CONFIG_NAME_MQ_MESSAGE_BODY_JMS = "mq.message.body.jms"; + public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BODY_JMS = "Whether to generate the message body as a JMS message type."; + public static final String CONFIG_DISPLAY_MQ_MESSAGE_BODY_JMS = "Message body as JMS"; + + public static final String CONFIG_NAME_MQ_TIME_TO_LIVE = "mq.time.to.live"; + public static final String CONFIG_DOCUMENTATION_MQ_TIME_TO_LIVE = "Time-to-live in milliseconds for messages sent to MQ."; + public static final String CONFIG_DISPLAY_MQ_TIME_TO_LIVE = "Message time-to-live (ms)"; + + public static final String CONFIG_NAME_MQ_PERSISTENT = "mq.persistent"; + public static final String CONFIG_DOCUMENTATION_MQ_PERSISTENT = "Send persistent or non-persistent messages to MQ."; + public static final String CONFIG_DISPLAY_MQ_PERSISTENT = "Send persistent messages"; + + public static final String CONFIG_NAME_MQ_SSL_CIPHER_SUITE = "mq.ssl.cipher.suite"; + public static final String CONFIG_DOCUMENTATION_MQ_SSL_CIPHER_SUITE = "The name of the cipher suite for the TLS (SSL) connection."; + public static final String CONFIG_DISPLAY_MQ_SSL_CIPHER_SUITE = "SSL cipher suite"; + + public static final String CONFIG_NAME_MQ_SSL_PEER_NAME = "mq.ssl.peer.name"; + public static final String CONFIG_DOCUMENTATION_MQ_SSL_PEER_NAME = "The distinguished name pattern of the TLS (SSL) peer."; + public static final String CONFIG_DISPLAY_MQ_SSL_PEER_NAME = "SSL peer name"; + + public static final String CONFIG_NAME_MQ_SSL_KEYSTORE_LOCATION = "mq.ssl.keystore.location"; + public static final String CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_LOCATION = "The path to the JKS keystore to use for the TLS (SSL) connection."; + public static final String CONFIG_DISPLAY_MQ_SSL_KEYSTORE_LOCATION = "SSL keystore location"; + + public static final String CONFIG_NAME_MQ_SSL_KEYSTORE_PASSWORD = "mq.ssl.keystore.password"; + public static final String CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_PASSWORD = "The password of the JKS keystore to use for the TLS (SSL) connection."; + public static final String CONFIG_DISPLAY_MQ_SSL_KEYSTORE_PASSWORD = "SSL keystore password"; + + public static final String CONFIG_NAME_MQ_SSL_TRUSTSTORE_LOCATION = "mq.ssl.truststore.location"; + public static final String CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_LOCATION = "The path to the JKS truststore to use for the TLS (SSL) connection."; + public static final String CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_LOCATION = "SSL truststore location"; + + public static final String CONFIG_NAME_MQ_SSL_TRUSTSTORE_PASSWORD = "mq.ssl.truststore.password"; + public static final String CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_PASSWORD = "The password of the JKS truststore to use for the TLS (SSL) connection."; + public static final String CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_PASSWORD = "SSL truststore password"; + + public static final String CONFIG_NAME_MQ_SSL_USE_IBM_CIPHER_MAPPINGS = "mq.ssl.use.ibm.cipher.mappings"; + public static final String CONFIG_DOCUMENTATION_MQ_SSL_USE_IBM_CIPHER_MAPPINGS = "Whether to set system property to control use of IBM cipher mappings."; + public static final String CONFIG_DISPLAY_MQ_SSL_USE_IBM_CIPHER_MAPPINGS = "Use IBM cipher mappings"; + + public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER_KEY_HEADER = "mq.message.builder.key.header"; + public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_KEY_HEADER = "The JMS message header to set from the Kafka record key."; + public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_KEY_HEADER = "Record builder key header"; + public static final String CONFIG_VALUE_MQ_MESSAGE_BUILDER_KEY_HEADER_JMSCORRELATIONID = "JMSCorrelationID"; + + public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER_VALUE_CONVERTER = "mq.message.builder.value.converter"; + public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_VALUE_CONVERTER = "Prefix for configuring message builder's value converter."; + public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_VALUE_CONVERTER = "Message builder's value converter"; + + public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY = "mq.message.builder.topic.property"; + public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY = "The JMS message property to set from the Kafka topic."; + public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY = "Kafka topic message property"; + + public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY = "mq.message.builder.partition.property"; + public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY = "The JMS message property to set from the Kafka partition."; + public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY = "Kafka partition message property"; + + public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY = "mq.message.builder.offset.property"; + public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY = "The JMS message property to set from the Kafka offset."; + public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY = "Kafka offset message property"; + + public static final String CONFIG_NAME_MQ_REPLY_QUEUE = "mq.reply.queue"; + public static final String CONFIG_DOCUMENTATION_MQ_REPLY_QUEUE = "The name of the reply-to queue, as a queue name or URI."; + public static final String CONFIG_DISPLAY_MQ_REPLY_QUEUE = "Reply-to queue"; + + public static final String CONFIG_NAME_MQ_USER_AUTHENTICATION_MQCSP = "mq.user.authentication.mqcsp"; + public static final String CONFIG_DOCUMENTATION_MQ_USER_AUTHENTICATION_MQCSP = "Whether to use MQ connection security parameters (MQCSP)."; + public static final String CONFIG_DISPLAY_MQ_USER_AUTHENTICATION_MQCSP = "User authentication using MQCSP"; + + public static final String CONFIG_NAME_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES = "mq.kafka.headers.copy.to.jms.properties"; + public static final String CONFIG_DOCUMENTATION_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES = "Whether to copy Kafka headers to JMS message properties."; + public static final String CONFIG_DISPLAY_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES = "Copy Kafka headers to JMS message properties"; + + public static final String CONFIG_NAME_MQ_RETRY_BACKOFF_MS = "mq.retry.backoff.ms"; + public static final String CONFIG_DOCUMENTATION_MQ_RETRY_BACKOFF_MS = "Time to wait, in milliseconds, before retrying after retriable exceptions"; + public static final String CONFIG_DISPLAY_MQ_RETRY_BACKOFF_MS = "Retry backoff (ms)"; + + // https://www.ibm.com/docs/en/ibm-mq/9.3?topic=amffmcja-reading-writing-message-descriptor-from-mq-classes-jms-application + public static final String CONFIG_NAME_MQ_MQMD_WRITE_ENABLED = "mq.message.mqmd.write"; + public static final String CONFIG_DISPLAY_MQ_MQMD_WRITE_ENABLED = "Enable a custom message builder to write MQ message descriptors"; + public static final String CONFIG_DOCUMENTATION_MQ_MQMD_WRITE_ENABLED = "This configuration option determines whether the MQMD structure will be written along with the message data. Enabling this option allows control information to accompany the application data during message transmission between sending and receiving applications. Disabling this option will exclude the MQMD structure from the message payload."; + + // https://www.ibm.com/docs/en/ibm-mq/9.3?topic=application-jms-message-object-properties + public static final String CONFIG_NAME_MQ_MQMD_MESSAGE_CONTEXT = "mq.message.mqmd.context"; + public static final String CONFIG_DISPLAY_MQ_MQMD_MESSAGE_CONTEXT = "Message context to set on the destination queue. This is required when setting some message descriptors."; + public static final String CONFIG_DOCUMENTATION_MQ_MQMD_MESSAGE_CONTEXT = "This configuration option specifies the context in which MQMD properties are applied. Certain properties require this context to be set appropriately for them to take effect. Valid options for WMQ_MQMD_MESSAGE_CONTEXT are IDENTITY for WMQ_MDCTX_SET_IDENTITY_CONTEXT or ALL for WMQ_MDCTX_SET_ALL_CONTEXT."; + + private static final Validator ANY_VALUE_VALID = null; + + public static ConfigDef config() { + final ConfigDef config = new ConfigDef(); + + config.define(CONFIG_NAME_MQ_QUEUE_MANAGER, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyStringWithoutControlChars(), Importance.HIGH, CONFIG_DOCUMENTATION_MQ_QUEUE_MANAGER, CONFIG_GROUP_MQ, 1, Width.MEDIUM, CONFIG_DISPLAY_MQ_QUEUE_MANAGER); + + config.define(CONFIG_NAME_MQ_CONNECTION_MODE, Type.STRING, CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT, ConfigDef.ValidString.in(CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT, CONFIG_VALUE_MQ_CONNECTION_MODE_BINDINGS), Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_CONNECTION_MODE, CONFIG_GROUP_MQ, 2, Width.SHORT, CONFIG_DISPLAY_MQ_CONNECTION_MODE); + + config.define(CONFIG_NAME_MQ_CONNECTION_NAME_LIST, Type.STRING, null, ANY_VALUE_VALID, Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_CONNNECTION_NAME_LIST, CONFIG_GROUP_MQ, 3, Width.LONG, CONFIG_DISPLAY_MQ_CONNECTION_NAME_LIST); + + config.define(CONFIG_NAME_MQ_CHANNEL_NAME, Type.STRING, null, ANY_VALUE_VALID, Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_CHANNEL_NAME, CONFIG_GROUP_MQ, 4, Width.MEDIUM, CONFIG_DISPLAY_MQ_CHANNEL_NAME); + + config.define(CONFIG_NAME_MQ_CCDT_URL, Type.STRING, null, new ValidURL(), Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_CCDT_URL, CONFIG_GROUP_MQ, 5, Width.MEDIUM, CONFIG_DISPLAY_MQ_CCDT_URL); + + config.define(CONFIG_NAME_MQ_QUEUE, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, new ConfigDef.NonEmptyStringWithoutControlChars(), Importance.HIGH, CONFIG_DOCUMENTATION_MQ_QUEUE, CONFIG_GROUP_MQ, 6, Width.LONG, CONFIG_DISPLAY_MQ_QUEUE); + + config.define(CONFIG_NAME_MQ_USER_NAME, Type.STRING, null, ANY_VALUE_VALID, Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_USER_NAME, CONFIG_GROUP_MQ, 7, Width.MEDIUM, CONFIG_DISPLAY_MQ_USER_NAME); + + config.define(CONFIG_NAME_MQ_PASSWORD, Type.PASSWORD, null, ANY_VALUE_VALID, Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_PASSWORD, CONFIG_GROUP_MQ, 8, Width.MEDIUM, CONFIG_DISPLAY_MQ_PASSWORD); + + config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, new ValidClass(), Importance.HIGH, CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER, CONFIG_GROUP_MQ, 9, Width.MEDIUM, CONFIG_DISPLAY_MQ_MESSAGE_BUILDER); + + config.define(CONFIG_NAME_MQ_MESSAGE_BODY_JMS, Type.BOOLEAN, Boolean.FALSE, new ConfigDef.NonNullValidator(), Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_MESSAGE_BODY_JMS, CONFIG_GROUP_MQ, 10, Width.SHORT, CONFIG_DISPLAY_MQ_MESSAGE_BODY_JMS); + + config.define(CONFIG_NAME_MQ_TIME_TO_LIVE, Type.LONG, 0, Range.between(0L, 99999999900L), Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_TIME_TO_LIVE, CONFIG_GROUP_MQ, 11, Width.SHORT, CONFIG_DISPLAY_MQ_TIME_TO_LIVE); + + config.define(CONFIG_NAME_MQ_PERSISTENT, Type.BOOLEAN, Boolean.TRUE, Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_PERSISTENT, CONFIG_GROUP_MQ, 12, Width.SHORT, CONFIG_DISPLAY_MQ_PERSISTENT); + + config.define(CONFIG_NAME_MQ_SSL_CIPHER_SUITE, Type.STRING, null, ANY_VALUE_VALID, Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_SSL_CIPHER_SUITE, CONFIG_GROUP_MQ, 13, Width.MEDIUM, CONFIG_DISPLAY_MQ_SSL_CIPHER_SUITE); + + config.define(CONFIG_NAME_MQ_SSL_PEER_NAME, Type.STRING, null, ANY_VALUE_VALID, Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_SSL_PEER_NAME, CONFIG_GROUP_MQ, 14, Width.MEDIUM, CONFIG_DISPLAY_MQ_SSL_PEER_NAME); + + config.define(CONFIG_NAME_MQ_SSL_KEYSTORE_LOCATION, Type.STRING, null, new ValidFileLocation(), Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_LOCATION, CONFIG_GROUP_MQ, 15, Width.MEDIUM, CONFIG_DISPLAY_MQ_SSL_KEYSTORE_LOCATION); + + config.define(CONFIG_NAME_MQ_SSL_KEYSTORE_PASSWORD, Type.PASSWORD, null, Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_PASSWORD, CONFIG_GROUP_MQ, 16, Width.MEDIUM, CONFIG_DISPLAY_MQ_SSL_KEYSTORE_PASSWORD); + + config.define(CONFIG_NAME_MQ_SSL_TRUSTSTORE_LOCATION, Type.STRING, null, new ValidFileLocation(), Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_LOCATION, CONFIG_GROUP_MQ, 17, Width.MEDIUM, CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_LOCATION); + + config.define(CONFIG_NAME_MQ_SSL_TRUSTSTORE_PASSWORD, Type.PASSWORD, null, Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_PASSWORD, CONFIG_GROUP_MQ, 18, Width.MEDIUM, CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_PASSWORD); + + config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER_KEY_HEADER, Type.STRING, null, ConfigDef.ValidString.in(null, CONFIG_VALUE_MQ_MESSAGE_BUILDER_KEY_HEADER_JMSCORRELATIONID), Importance.MEDIUM, CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_KEY_HEADER, CONFIG_GROUP_MQ, 19, Width.MEDIUM, CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_KEY_HEADER); + + config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER_VALUE_CONVERTER, Type.STRING, null, new ValidClass(), Importance.LOW, CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_VALUE_CONVERTER, CONFIG_GROUP_MQ, 20, Width.MEDIUM, CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_VALUE_CONVERTER); + + config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY, Type.STRING, null, ANY_VALUE_VALID, Importance.LOW, CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY, CONFIG_GROUP_MQ, 21, Width.MEDIUM, CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY); + + config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY, Type.STRING, null, ANY_VALUE_VALID, Importance.LOW, CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY, CONFIG_GROUP_MQ, 22, Width.MEDIUM, CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY); + + config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY, Type.STRING, null, ANY_VALUE_VALID, Importance.LOW, CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY, CONFIG_GROUP_MQ, 23, Width.MEDIUM, CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY); + + config.define(CONFIG_NAME_MQ_REPLY_QUEUE, Type.STRING, null, ANY_VALUE_VALID, Importance.LOW, CONFIG_DOCUMENTATION_MQ_REPLY_QUEUE, CONFIG_GROUP_MQ, 24, Width.MEDIUM, CONFIG_DISPLAY_MQ_REPLY_QUEUE); + + config.define(CONFIG_NAME_MQ_USER_AUTHENTICATION_MQCSP, Type.BOOLEAN, Boolean.TRUE, Importance.LOW, CONFIG_DOCUMENTATION_MQ_USER_AUTHENTICATION_MQCSP, CONFIG_GROUP_MQ, 25, Width.SHORT, CONFIG_DISPLAY_MQ_USER_AUTHENTICATION_MQCSP); + + config.define(CONFIG_NAME_MQ_SSL_USE_IBM_CIPHER_MAPPINGS, Type.BOOLEAN, null, Importance.LOW, CONFIG_DOCUMENTATION_MQ_SSL_USE_IBM_CIPHER_MAPPINGS, CONFIG_GROUP_MQ, 26, Width.SHORT, CONFIG_DISPLAY_MQ_SSL_USE_IBM_CIPHER_MAPPINGS); + + config.define(CONFIG_NAME_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES, Type.BOOLEAN, Boolean.FALSE, Importance.LOW, CONFIG_DOCUMENTATION_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES, CONFIG_GROUP_MQ, 27, Width.SHORT, CONFIG_DISPLAY_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES); + + config.define(CONFIG_NAME_MQ_RETRY_BACKOFF_MS, Type.LONG, 60000, Range.between(0L, 99999999900L), Importance.LOW, CONFIG_DOCUMENTATION_MQ_RETRY_BACKOFF_MS, CONFIG_GROUP_MQ, 28, Width.SHORT, CONFIG_DISPLAY_MQ_RETRY_BACKOFF_MS); + + config.define(CONFIG_NAME_MQ_EXACTLY_ONCE_STATE_QUEUE, Type.STRING, null, ANY_VALUE_VALID, Importance.LOW, CONFIG_DOCUMENTATION_MQ_EXACTLY_ONCE_STATE_QUEUE, CONFIG_GROUP_MQ, 29, Width.LONG, CONFIG_DISPLAY_MQ_EXACTLY_ONCE_STATE_QUEUE); + + config.define(CONFIG_NAME_MQ_MQMD_WRITE_ENABLED, Type.BOOLEAN, false, Importance.LOW, + CONFIG_DOCUMENTATION_MQ_MQMD_WRITE_ENABLED, CONFIG_GROUP_MQ, 30, Width.LONG, + CONFIG_DISPLAY_MQ_MQMD_WRITE_ENABLED); + + config.define(CONFIG_NAME_MQ_MQMD_MESSAGE_CONTEXT, Type.STRING, null, + ConfigDef.ValidString.in(null, "identity", "IDENTITY", "all", "ALL"), + Importance.LOW, + CONFIG_DOCUMENTATION_MQ_MQMD_MESSAGE_CONTEXT, CONFIG_GROUP_MQ, 31, Width.LONG, + CONFIG_DISPLAY_MQ_MQMD_MESSAGE_CONTEXT); + return config; + } + + + private static class ValidURL implements ConfigDef.Validator { + @Override + public void ensureValid(final String name, final Object value) { + final String stringValue = (String) value; + if (stringValue == null || stringValue.isEmpty()) { + // URLs are optional values + return; + } + + try { + new URL(stringValue); + + } catch (final MalformedURLException exception) { + throw new ConfigException(name, value, "Value must be a URL for a CCDT file"); + } + } + } + + private static class ValidClass implements ConfigDef.Validator { + @Override + public void ensureValid(final String name, final Object value) { + Class requiredClass = null; + final String stringValue = (String) value; + if (name.endsWith("builder")) { + requiredClass = MessageBuilder.class; + } else { // converter + requiredClass = Converter.class; + + } + if (stringValue == null || stringValue.isEmpty()) { + return; + } + try { + Class.forName(stringValue).asSubclass(requiredClass).newInstance(); + } catch (final ClassNotFoundException exc) { + log.error("Failed to validate class {}", stringValue); + throw new ConfigException(name, value, "Class must be accessible on the classpath for Kafka Connect"); + } catch (final ClassCastException | IllegalAccessException exc) { + log.error("Failed to validate class {}", stringValue); + throw new ConfigException(name, value, "Class must be an implementation of " + requiredClass.getCanonicalName()); + } catch (final InstantiationException exc) { + log.error("Failed to validate class {}", stringValue); + throw new ConfigException(name, value, "Unable to create an instance of the class"); + } catch (final NullPointerException exc) { + throw new ConfigException(name, value, "Value must not be null"); + } + } + } + + private static class ValidFileLocation implements ConfigDef.Validator { + @Override + public void ensureValid(final String name, final Object value) { + final String stringValue = (String) value; + if (stringValue == null || stringValue.isEmpty()) { + // URLs are optional values + return; + } + File f = null; + try { + f = new File(stringValue); + } catch (final Exception exception) { + throw new ConfigException(name, value, "Value must be a File Location"); + } + if (!f.isFile()) { + throw new ConfigException(name, value, "Value must be a File location"); + } + if (!f.canRead()) { + throw new ConfigException(name, value, "Value must be a readable file"); + } + } + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/MQSinkConnector.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/MQSinkConnector.java index 763c5e9..910b84b 100644 --- a/src/main/java/com/ibm/eventstreams/connect/mqsink/MQSinkConnector.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/MQSinkConnector.java @@ -1,5 +1,5 @@ /** - * Copyright 2017, 2020 IBM Corporation + * Copyright 2017, 2020, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,12 +21,10 @@ import java.util.Map; import java.util.Map.Entry; +import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; -import org.apache.kafka.common.config.ConfigDef.Importance; -import org.apache.kafka.common.config.ConfigDef.Range; -import org.apache.kafka.common.config.ConfigDef.Type; -import org.apache.kafka.common.config.ConfigDef.Width; import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.sink.SinkConnector; import org.slf4j.Logger; @@ -35,148 +33,29 @@ public class MQSinkConnector extends SinkConnector { private static final Logger log = LoggerFactory.getLogger(MQSinkConnector.class); - public static final String CONFIG_GROUP_MQ = "mq"; - - public static final String CONFIG_NAME_MQ_QUEUE_MANAGER = "mq.queue.manager"; - public static final String CONFIG_DOCUMENTATION_MQ_QUEUE_MANAGER = "The name of the MQ queue manager."; - public static final String CONFIG_DISPLAY_MQ_QUEUE_MANAGER = "Queue manager"; - - public static final String CONFIG_NAME_MQ_CONNECTION_MODE = "mq.connection.mode"; - public static final String CONFIG_DOCUMENTATION_MQ_CONNECTION_MODE = "The connection mode - bindings or client."; - public static final String CONFIG_DISPLAY_MQ_CONNECTION_MODE = "Connection mode"; - public static final String CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT = "client"; - public static final String CONFIG_VALUE_MQ_CONNECTION_MODE_BINDINGS = "bindings"; - - public static final String CONFIG_NAME_MQ_CONNECTION_NAME_LIST = "mq.connection.name.list"; - public static final String CONFIG_DOCUMENTATION_MQ_CONNNECTION_NAME_LIST = "A list of one or more host(port) entries for connecting to the queue manager. Entries are separated with a comma."; - public static final String CONFIG_DISPLAY_MQ_CONNECTION_NAME_LIST = "List of connection names for queue manager"; - - public static final String CONFIG_NAME_MQ_CHANNEL_NAME = "mq.channel.name"; - public static final String CONFIG_DOCUMENTATION_MQ_CHANNEL_NAME = "The name of the server-connection channel."; - public static final String CONFIG_DISPLAY_MQ_CHANNEL_NAME = "Channel name"; - - public static final String CONFIG_NAME_MQ_QUEUE = "mq.queue"; - public static final String CONFIG_DOCUMENTATION_MQ_QUEUE = "The name of the target MQ queue."; - public static final String CONFIG_DISPLAY_MQ_QUEUE = "Target queue"; - - public static final String CONFIG_NAME_MQ_USER_NAME = "mq.user.name"; - public static final String CONFIG_DOCUMENTATION_MQ_USER_NAME = "The user name for authenticating with the queue manager."; - public static final String CONFIG_DISPLAY_MQ_USER_NAME = "User name"; - - public static final String CONFIG_NAME_MQ_PASSWORD = "mq.password"; - public static final String CONFIG_DOCUMENTATION_MQ_PASSWORD = "The password for authenticating with the queue manager."; - public static final String CONFIG_DISPLAY_MQ_PASSWORD = "Password"; - - public static final String CONFIG_NAME_MQ_CCDT_URL = "mq.ccdt.url"; - public static final String CONFIG_DOCUMENTATION_MQ_CCDT_URL = "The CCDT URL to use to establish a connection to the queue manager."; - public static final String CONFIG_DISPLAY_MQ_CCDT_URL = "CCDT URL"; - - public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER = "mq.message.builder"; - public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER = "The class used to build the MQ messages."; - public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER = "Message builder"; - - public static final String CONFIG_NAME_MQ_MESSAGE_BODY_JMS = "mq.message.body.jms"; - public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BODY_JMS = "Whether to generate the message body as a JMS message type."; - public static final String CONFIG_DISPLAY_MQ_MESSAGE_BODY_JMS = "Message body as JMS"; - - public static final String CONFIG_NAME_MQ_TIME_TO_LIVE = "mq.time.to.live"; - public static final String CONFIG_DOCUMENTATION_MQ_TIME_TO_LIVE = "Time-to-live in milliseconds for messages sent to MQ."; - public static final String CONFIG_DISPLAY_MQ_TIME_TO_LIVE = "Message time-to-live (ms)"; - - public static final String CONFIG_NAME_MQ_PERSISTENT = "mq.persistent"; - public static final String CONFIG_DOCUMENTATION_MQ_PERSISTENT = "Send persistent or non-persistent messages to MQ."; - public static final String CONFIG_DISPLAY_MQ_PERSISTENT = "Send persistent messages"; - - public static final String CONFIG_NAME_MQ_SSL_CIPHER_SUITE = "mq.ssl.cipher.suite"; - public static final String CONFIG_DOCUMENTATION_MQ_SSL_CIPHER_SUITE = "The name of the cipher suite for the TLS (SSL) connection."; - public static final String CONFIG_DISPLAY_MQ_SSL_CIPHER_SUITE = "SSL cipher suite"; - - public static final String CONFIG_NAME_MQ_SSL_PEER_NAME = "mq.ssl.peer.name"; - public static final String CONFIG_DOCUMENTATION_MQ_SSL_PEER_NAME = "The distinguished name pattern of the TLS (SSL) peer."; - public static final String CONFIG_DISPLAY_MQ_SSL_PEER_NAME = "SSL peer name"; - - public static final String CONFIG_NAME_MQ_SSL_KEYSTORE_LOCATION = "mq.ssl.keystore.location"; - public static final String CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_LOCATION = "The path to the JKS keystore to use for the TLS (SSL) connection."; - public static final String CONFIG_DISPLAY_MQ_SSL_KEYSTORE_LOCATION = "SSL keystore location"; - - public static final String CONFIG_NAME_MQ_SSL_KEYSTORE_PASSWORD = "mq.ssl.keystore.password"; - public static final String CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_PASSWORD = "The password of the JKS keystore to use for the TLS (SSL) connection."; - public static final String CONFIG_DISPLAY_MQ_SSL_KEYSTORE_PASSWORD = "SSL keystore password"; - - public static final String CONFIG_NAME_MQ_SSL_TRUSTSTORE_LOCATION = "mq.ssl.truststore.location"; - public static final String CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_LOCATION = "The path to the JKS truststore to use for the TLS (SSL) connection."; - public static final String CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_LOCATION = "SSL truststore location"; - - public static final String CONFIG_NAME_MQ_SSL_TRUSTSTORE_PASSWORD = "mq.ssl.truststore.password"; - public static final String CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_PASSWORD = "The password of the JKS truststore to use for the TLS (SSL) connection."; - public static final String CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_PASSWORD = "SSL truststore password"; - - public static final String CONFIG_NAME_MQ_SSL_USE_IBM_CIPHER_MAPPINGS = "mq.ssl.use.ibm.cipher.mappings"; - public static final String CONFIG_DOCUMENTATION_MQ_SSL_USE_IBM_CIPHER_MAPPINGS = "Whether to set system property to control use of IBM cipher mappings."; - public static final String CONFIG_DISPLAY_MQ_SSL_USE_IBM_CIPHER_MAPPINGS = "Use IBM cipher mappings"; - - public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER_KEY_HEADER = "mq.message.builder.key.header"; - public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_KEY_HEADER = "The JMS message header to set from the Kafka record key."; - public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_KEY_HEADER = "Record builder key header"; - public static final String CONFIG_VALUE_MQ_MESSAGE_BUILDER_KEY_HEADER_JMSCORRELATIONID = "JMSCorrelationID"; - - public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER_VALUE_CONVERTER = "mq.message.builder.value.converter"; - public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_VALUE_CONVERTER = "Prefix for configuring message builder's value converter."; - public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_VALUE_CONVERTER = "Message builder's value converter"; - - public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY = "mq.message.builder.topic.property"; - public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY = "The JMS message property to set from the Kafka topic."; - public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY = "Kafka topic message property"; - - public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY = "mq.message.builder.partition.property"; - public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY = "The JMS message property to set from the Kafka partition."; - public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY = "Kafka partition message property"; - - public static final String CONFIG_NAME_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY = "mq.message.builder.offset.property"; - public static final String CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY = "The JMS message property to set from the Kafka offset."; - public static final String CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY = "Kafka offset message property"; - - public static final String CONFIG_NAME_MQ_REPLY_QUEUE = "mq.reply.queue"; - public static final String CONFIG_DOCUMENTATION_MQ_REPLY_QUEUE = "The name of the reply-to queue, as a queue name or URI."; - public static final String CONFIG_DISPLAY_MQ_REPLY_QUEUE = "Reply-to queue"; - - public static final String CONFIG_NAME_MQ_USER_AUTHENTICATION_MQCSP = "mq.user.authentication.mqcsp"; - public static final String CONFIG_DOCUMENTATION_MQ_USER_AUTHENTICATION_MQCSP = "Whether to use MQ connection security parameters (MQCSP)."; - public static final String CONFIG_DISPLAY_MQ_USER_AUTHENTICATION_MQCSP = "User authentication using MQCSP"; - - public static final String CONFIG_NAME_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES = "mq.kafka.headers.copy.to.jms.properties"; - public static final String CONFIG_DOCUMENTATION_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES = "Whether to copy Kafka headers to JMS message properties."; - public static final String CONFIG_DISPLAY_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES = "Copy Kafka headers to JMS message properties"; - - public static final String CONFIG_NAME_MQ_RETRY_BACKOFF_MS = "mq.retry.backoff.ms"; - public static final String CONFIG_DOCUMENTATION_MQ_RETRY_BACKOFF_MS = "Time to wait, in milliseconds, before retrying after retriable exceptions"; - public static final String CONFIG_DISPLAY_MQ_RETRY_BACKOFF_MS = "Retry backoff (ms)"; - - - public static String version = "1.5.2"; + public static String version = "2.2.0"; private Map configProps; - /** - * Get the version of this connector. + /** Get the version of this connector. * - * @return the version, formatted as a String - */ - @Override public String version() { + * @return the version, formatted as a String */ + @Override + public String version() { return version; } - /** - * Start this Connector. This method will only be called on a clean Connector, i.e. it has - * either just been instantiated and initialized or {@link #stop()} has been invoked. + /** Start this Connector. This method will only be called on a clean Connector, i.e. it has either just been + * instantiated and initialized or {@link #stop()} has been invoked. * - * @param props configuration settings - */ - @Override public void start(final Map props) { + * @param props + * configuration settings */ + @Override + public void start(final Map props) { log.trace("[{}] Entry {}.start, props={}", Thread.currentThread().getId(), this.getClass().getName(), props); configProps = props; - for (final Entry entry: props.entrySet()) { + for (final Entry entry : props.entrySet()) { final String value; if (entry.getKey().toLowerCase(Locale.ENGLISH).contains("password")) { value = "[hidden]"; @@ -189,162 +68,64 @@ public class MQSinkConnector extends SinkConnector { log.trace("[{}] Exit {}.start", Thread.currentThread().getId(), this.getClass().getName()); } - /** - * Returns the Task implementation for this Connector. - */ - @Override public Class taskClass() { + /** Returns the Task implementation for this Connector. */ + @Override + public Class taskClass() { return MQSinkTask.class; - } + } - /** - * Returns a set of configurations for Tasks based on the current configuration, - * producing at most count configurations. + /** Returns a set of configurations for Tasks based on the current configuration, producing at most count + * configurations. * - * @param maxTasks maximum number of configurations to generate - * @return configurations for Tasks - */ - @Override public List> taskConfigs(final int maxTasks) { - log.trace("[{}] Entry {}.taskConfigs, maxTasks={}", Thread.currentThread().getId(), this.getClass().getName(), maxTasks); - + * @param maxTasks + * maximum number of configurations to generate + * @return configurations for Tasks */ + @Override + public List> taskConfigs(final int maxTasks) { + log.trace("[{}] Entry {}.taskConfigs, maxTasks={}", Thread.currentThread().getId(), this.getClass().getName(), + maxTasks); + + final String exactlyOnceStateQueue = configProps.get(MQSinkConfig.CONFIG_NAME_MQ_EXACTLY_ONCE_STATE_QUEUE); + if (exactlyOnceStateQueue != null && !exactlyOnceStateQueue.isEmpty() && maxTasks > 1) { + throw new ConnectException( + String.format("%s must be empty or not set when maxTasks > 1", + MQSinkConfig.CONFIG_NAME_MQ_EXACTLY_ONCE_STATE_QUEUE)); + } final List> taskConfigs = new ArrayList<>(); for (int i = 0; i < maxTasks; i++) { taskConfigs.add(configProps); } - log.trace("[{}] Exit {}.taskConfigs, retval={}", Thread.currentThread().getId(), this.getClass().getName(), taskConfigs); + log.trace("[{}] Exit {}.taskConfigs, retval={}", Thread.currentThread().getId(), this.getClass().getName(), + taskConfigs); return taskConfigs; } - /** - * Stop this connector. - */ - @Override public void stop() { + /** Stop this connector. */ + @Override + public void stop() { log.trace("[{}] Entry {}.stop", Thread.currentThread().getId(), this.getClass().getName()); log.trace("[{}] Exit {}.stop", Thread.currentThread().getId(), this.getClass().getName()); } + /** Define the configuration for the connector. + * + * @return The ConfigDef for this connector. */ + @Override + public ConfigDef config() { + return MQSinkConfig.config(); + } + /** - * Define the configuration for the connector. - * @return The ConfigDef for this connector. + * Returns true if the supplied connector configuration supports exactly-once semantics. + * Checks that 'mq.exactly.once.state.queue' property is supplied and is not empty. + * + * @param connectorConfig the connector config + * @return true if 'mq.exactly.once.state.queue' property is supplied and is not empty. */ - @Override public ConfigDef config() { - final ConfigDef config = new ConfigDef(); - - config.define(CONFIG_NAME_MQ_QUEUE_MANAGER, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, Importance.HIGH, - CONFIG_DOCUMENTATION_MQ_QUEUE_MANAGER, CONFIG_GROUP_MQ, 1, Width.MEDIUM, - CONFIG_DISPLAY_MQ_QUEUE_MANAGER); - - config.define(CONFIG_NAME_MQ_CONNECTION_MODE, Type.STRING, CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT, - ConfigDef.ValidString.in(CONFIG_VALUE_MQ_CONNECTION_MODE_CLIENT, - CONFIG_VALUE_MQ_CONNECTION_MODE_BINDINGS), - Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_CONNECTION_MODE, CONFIG_GROUP_MQ, 2, Width.SHORT, - CONFIG_DISPLAY_MQ_CONNECTION_MODE); - - config.define(CONFIG_NAME_MQ_CONNECTION_NAME_LIST, Type.STRING, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_CONNNECTION_NAME_LIST, CONFIG_GROUP_MQ, 3, Width.LONG, - CONFIG_DISPLAY_MQ_CONNECTION_NAME_LIST); - - config.define(CONFIG_NAME_MQ_CHANNEL_NAME, Type.STRING, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_CHANNEL_NAME, CONFIG_GROUP_MQ, 4, Width.MEDIUM, - CONFIG_DISPLAY_MQ_CHANNEL_NAME); - - config.define(CONFIG_NAME_MQ_CCDT_URL, Type.STRING, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_CCDT_URL, CONFIG_GROUP_MQ, 5, Width.MEDIUM, - CONFIG_DISPLAY_MQ_CCDT_URL); - - config.define(CONFIG_NAME_MQ_QUEUE, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, Importance.HIGH, - CONFIG_DOCUMENTATION_MQ_QUEUE, CONFIG_GROUP_MQ, 6, Width.LONG, - CONFIG_DISPLAY_MQ_QUEUE); - - config.define(CONFIG_NAME_MQ_USER_NAME, Type.STRING, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_USER_NAME, CONFIG_GROUP_MQ, 7, Width.MEDIUM, - CONFIG_DISPLAY_MQ_USER_NAME); - - config.define(CONFIG_NAME_MQ_PASSWORD, Type.PASSWORD, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_PASSWORD, CONFIG_GROUP_MQ, 8, Width.MEDIUM, - CONFIG_DISPLAY_MQ_PASSWORD); - - config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, Importance.HIGH, - CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER, CONFIG_GROUP_MQ, 9, Width.MEDIUM, - CONFIG_DISPLAY_MQ_MESSAGE_BUILDER); - - config.define(CONFIG_NAME_MQ_MESSAGE_BODY_JMS, Type.BOOLEAN, Boolean.FALSE, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_MESSAGE_BODY_JMS, CONFIG_GROUP_MQ, 10, Width.SHORT, - CONFIG_DISPLAY_MQ_MESSAGE_BODY_JMS); - - config.define(CONFIG_NAME_MQ_TIME_TO_LIVE, Type.LONG, 0, Range.between(0L, 99999999900L), Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_TIME_TO_LIVE, CONFIG_GROUP_MQ, 11, Width.SHORT, - CONFIG_DISPLAY_MQ_TIME_TO_LIVE); - - config.define(CONFIG_NAME_MQ_PERSISTENT, Type.BOOLEAN, "true", Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_PERSISTENT, CONFIG_GROUP_MQ, 12, Width.SHORT, - CONFIG_DISPLAY_MQ_PERSISTENT); - - config.define(CONFIG_NAME_MQ_SSL_CIPHER_SUITE, Type.STRING, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_CIPHER_SUITE, CONFIG_GROUP_MQ, 13, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_CIPHER_SUITE); - - config.define(CONFIG_NAME_MQ_SSL_PEER_NAME, Type.STRING, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_PEER_NAME, CONFIG_GROUP_MQ, 14, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_PEER_NAME); - - config.define(CONFIG_NAME_MQ_SSL_KEYSTORE_LOCATION, Type.STRING, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_LOCATION, CONFIG_GROUP_MQ, 15, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_KEYSTORE_LOCATION); - - config.define(CONFIG_NAME_MQ_SSL_KEYSTORE_PASSWORD, Type.PASSWORD, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_KEYSTORE_PASSWORD, CONFIG_GROUP_MQ, 16, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_KEYSTORE_PASSWORD); - - config.define(CONFIG_NAME_MQ_SSL_TRUSTSTORE_LOCATION, Type.STRING, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_LOCATION, CONFIG_GROUP_MQ, 17, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_LOCATION); - - config.define(CONFIG_NAME_MQ_SSL_TRUSTSTORE_PASSWORD, Type.PASSWORD, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_SSL_TRUSTSTORE_PASSWORD, CONFIG_GROUP_MQ, 18, Width.MEDIUM, - CONFIG_DISPLAY_MQ_SSL_TRUSTSTORE_PASSWORD); - - config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER_KEY_HEADER, Type.STRING, null, Importance.MEDIUM, - CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_KEY_HEADER, CONFIG_GROUP_MQ, 19, Width.MEDIUM, - CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_KEY_HEADER); - - config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER_VALUE_CONVERTER, Type.STRING, null, Importance.LOW, - CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_VALUE_CONVERTER, CONFIG_GROUP_MQ, 20, Width.MEDIUM, - CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_VALUE_CONVERTER); - - config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY, Type.STRING, null, Importance.LOW, - CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY, CONFIG_GROUP_MQ, 21, Width.MEDIUM, - CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY); - - config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY, Type.STRING, null, Importance.LOW, - CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY, CONFIG_GROUP_MQ, 22, Width.MEDIUM, - CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY); - - config.define(CONFIG_NAME_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY, Type.STRING, null, Importance.LOW, - CONFIG_DOCUMENTATION_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY, CONFIG_GROUP_MQ, 23, Width.MEDIUM, - CONFIG_DISPLAY_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY); - - config.define(CONFIG_NAME_MQ_REPLY_QUEUE, Type.STRING, null, Importance.LOW, - CONFIG_DOCUMENTATION_MQ_REPLY_QUEUE, CONFIG_GROUP_MQ, 24, Width.MEDIUM, - CONFIG_DISPLAY_MQ_REPLY_QUEUE); - - config.define(CONFIG_NAME_MQ_USER_AUTHENTICATION_MQCSP, Type.BOOLEAN, Boolean.TRUE, Importance.LOW, - CONFIG_DOCUMENTATION_MQ_USER_AUTHENTICATION_MQCSP, CONFIG_GROUP_MQ, 25, Width.SHORT, - CONFIG_DISPLAY_MQ_USER_AUTHENTICATION_MQCSP); - - config.define(CONFIG_NAME_MQ_SSL_USE_IBM_CIPHER_MAPPINGS, Type.BOOLEAN, null, Importance.LOW, - CONFIG_DOCUMENTATION_MQ_SSL_USE_IBM_CIPHER_MAPPINGS, CONFIG_GROUP_MQ, 26, Width.SHORT, - CONFIG_DISPLAY_MQ_SSL_USE_IBM_CIPHER_MAPPINGS); - - config.define(CONFIG_NAME_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES, Type.BOOLEAN, null, Importance.LOW, - CONFIG_DOCUMENTATION_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES, CONFIG_GROUP_MQ, 27, Width.SHORT, - CONFIG_DISPLAY_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES); - - config.define(CONFIG_NAME_MQ_RETRY_BACKOFF_MS, Type.LONG, 60000, Range.between(0L, 99999999900L), Importance.LOW, - CONFIG_DOCUMENTATION_MQ_RETRY_BACKOFF_MS, CONFIG_GROUP_MQ, 28, Width.SHORT, - CONFIG_DISPLAY_MQ_RETRY_BACKOFF_MS); - - return config; + public static final boolean configSupportsExactlyOnce(final AbstractConfig connectorConfig) { + // If there is a state queue configured, we can do exactly-once semantics + final String exactlyOnceStateQueue = connectorConfig.getString(MQSinkConfig.CONFIG_NAME_MQ_EXACTLY_ONCE_STATE_QUEUE); + return exactlyOnceStateQueue != null && !exactlyOnceStateQueue.isEmpty(); } -} \ No newline at end of file +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/MQSinkTask.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/MQSinkTask.java index 3cdb06e..a660a4a 100644 --- a/src/main/java/com/ibm/eventstreams/connect/mqsink/MQSinkTask.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/MQSinkTask.java @@ -1,127 +1,203 @@ /** - * Copyright 2017, 2020 IBM Corporation - * + * Copyright 2017, 2020, 2023, 2024 IBM Corporation + *

* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package com.ibm.eventstreams.connect.mqsink; import java.util.Collection; +import java.util.HashMap; import java.util.Map; -import java.util.Map.Entry; -import java.util.Locale; + +import javax.jms.JMSException; +import javax.jms.JMSRuntimeException; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.connect.connector.Connector; +import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.RetriableException; import org.apache.kafka.connect.sink.SinkRecord; import org.apache.kafka.connect.sink.SinkTask; - +import org.apache.kafka.connect.sink.SinkTaskContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.fasterxml.jackson.core.JsonProcessingException; + public class MQSinkTask extends SinkTask { private static final Logger log = LoggerFactory.getLogger(MQSinkTask.class); - private JMSWriter writer; + protected JMSWorker worker; - private long retryBackoffMs = 60000; + protected long retryBackoffMs = 60000; + private boolean isExactlyOnceMode = false; + + private HashMap lastCommittedOffsetMap; public MQSinkTask() { } + // visible for testing. + MQSinkTask(final MQSinkConfig connectorConfig, final SinkTaskContext context) throws Exception { + this.context = context; + } + /** - * Get the version of this task. Usually this should be the same as the corresponding {@link Connector} class's version. + * Get the version of this task. Usually this should be the same as the + * corresponding {@link Connector} class's version. * * @return the version, formatted as a String */ - @Override public String version() { + @Override + public String version() { return MQSinkConnector.version; } /** - * Start the Task. This should handle any configuration parsing and one-time setup of the task. + * Start the Task. This should handle any configuration parsing and one-time + * setup of the task. + * * @param props initial configuration */ - @Override public void start(final Map props) { + @Override + public void start(final Map props) { log.trace("[{}] Entry {}.start, props={}", Thread.currentThread().getId(), this.getClass().getName(), props); - for (final Entry entry: props.entrySet()) { - final String value; - if (entry.getKey().toLowerCase(Locale.ENGLISH).contains("password")) { - value = "[hidden]"; - } else { - value = entry.getValue(); - } - log.debug("Task props entry {} : {}", entry.getKey(), value); - } + try { + final AbstractConfig config = new AbstractConfig(MQSinkConfig.config(), props, true); - // check if a custom retry time is provided - final String retryBackoffMsStr = props.get(MQSinkConnector.CONFIG_NAME_MQ_RETRY_BACKOFF_MS); - if (retryBackoffMsStr != null) { - retryBackoffMs = Long.parseLong(retryBackoffMsStr); + this.isExactlyOnceMode = MQSinkConnector.configSupportsExactlyOnce(config); + if (this.isExactlyOnceMode) { + log.info("Exactly-once mode enabled"); + } + setRetryBackoff(config); + // Construct a worker to interface with MQ + worker = newJMSWorker(); + worker.configure(config); + // Make a connection as an initial test of the configuration + worker.connect(); + } catch (JMSRuntimeException | JMSWorkerConnectionException e) { + log.error("MQ Connection Exception: ", e); + stop(); + throw new ConnectException(e); + } catch (final ConnectException e) { + log.error("Unexpected connect exception: ", e); + stop(); + throw e; + } catch (final RuntimeException e) { + log.error("Unexpected runtime exception: ", e); + stop(); + throw e; } - log.debug("Setting retry backoff {}", retryBackoffMs); - - // Construct a writer to interface with MQ - writer = new JMSWriter(); - writer.configure(props); - - // Make a connection as an initial test of the configuration - writer.connect(); log.trace("[{}] Exit {}.start", Thread.currentThread().getId(), this.getClass().getName()); } /** - * Put the records in the sink. Usually this should send the records to the sink asynchronously - * and immediately return. - * - * If this operation fails, the SinkTask may throw a {@link org.apache.kafka.connect.errors.RetriableException} to - * indicate that the framework should attempt to retry the same call again. Other exceptions will cause the task to - * be stopped immediately. {@link SinkTaskContext#timeout(long)} can be used to set the maximum time before the - * batch will be retried. + * Put the records in the sink. Usually this should send the records to the sink + * asynchronously and immediately return. + *

+ * If this operation fails, the SinkTask may throw a + * {@link org.apache.kafka.connect.errors.RetriableException} to indicate that + * the framework should attempt to retry the same call again. Other exceptions + * will cause the task to be stopped immediately. + * {@link SinkTaskContext#timeout(long)} can be used to set the maximum time + * before the batch will be retried. * * @param records the set of records to send */ - @Override public void put(final Collection records) { + @Override + public void put(final Collection records) { log.trace("[{}] Entry {}.put, records.size={}", Thread.currentThread().getId(), this.getClass().getName(), records.size()); - try { - for (final SinkRecord r: records) { - log.debug("Putting record for topic {}, partition {} and offset {}", r.topic(), r.kafkaPartition(), r.kafkaOffset()); - writer.send(r); + try { + if (isExactlyOnceMode) { + putExactlyOnce(records); + } else { + putAtLeastOnce(records); + } + } catch (final JsonProcessingException jpe) { + maybeCloseAllWorkers(jpe); + throw new ConnectException(jpe); + } catch (final JMSRuntimeException | JMSException e) { + log.error("JMS Exception: ", e); + maybeCloseAllWorkers(e); + throw ExceptionProcessor.handleException(e); + } catch (final ConnectException e) { + log.error("Unexpected connect exception: ", e); + maybeCloseAllWorkers(e); + throw e; + } catch (final RuntimeException e) { + log.error("Unexpected runtime exception: ", e); + maybeCloseAllWorkers(e); + throw e; } - - writer.commit(); } catch (final RetriableException rte) { context.timeout(retryBackoffMs); throw rte; } - log.trace("[{}] Exit {}.put", Thread.currentThread().getId(), this.getClass().getName()); } + private void putExactlyOnce(final Collection records) throws JsonProcessingException, JMSException { + lastCommittedOffsetMap = worker.readFromStateQueue().orElse(new HashMap<>()); + for (final SinkRecord record : records) { + final TopicPartition topicPartition = new TopicPartition(record.topic(), record.kafkaPartition()); + if (isRecordAlreadyCommitted(record, topicPartition)) { + log.debug("Skipping record for topic {}, partition {} and offset {} as it has already been committed", record.topic(), record.kafkaPartition(), record.kafkaOffset()); + continue; + } + log.debug("Putting record for topic {}, partition {} and offset {}", record.topic(), record.kafkaPartition(), record.kafkaOffset()); + worker.send(record); + lastCommittedOffsetMap.put(topicPartition.toString(), String.valueOf(record.kafkaOffset())); + } + + worker.writeLastRecordOffsetToStateQueue(lastCommittedOffsetMap); + worker.commit(); + } + + private boolean isRecordAlreadyCommitted(final SinkRecord record, final TopicPartition topicPartition) { + final long lastCommittedOffset = Long.parseLong(lastCommittedOffsetMap.getOrDefault(topicPartition.toString(), "-1")); + if (record.kafkaOffset() <= lastCommittedOffset) { + return true; + } + return false; + } + + private void putAtLeastOnce(final Collection records) { + for (final SinkRecord record : records) { + log.debug("Putting record for topic {}, partition {} and offset {}", record.topic(), record.kafkaPartition(), record.kafkaOffset()); + worker.send(record); + } + worker.commit(); + } + /** - * Flush all records that have been {@link #put(Collection)} for the specified topic-partitions. + * Flush all records that have been {@link #put(Collection)} for the specified + * topic-partitions. * - * @param currentOffsets the current offset state as of the last call to {@link #put(Collection)}}, - * provided for convenience but could also be determined by tracking all offsets included in the {@link SinkRecord}s - * passed to {@link #put}. + * @param currentOffsets the current offset state as of the last call to + * {@link #put(Collection)}}, provided for convenience but + * could also be determined by tracking all offsets + * included in the {@link SinkRecord}s passed to + * {@link #put}. */ public void flush(final Map currentOffsets) { log.trace("[{}] Entry {}.flush", Thread.currentThread().getId(), this.getClass().getName()); - for (final Map.Entry entry: currentOffsets.entrySet()) { + for (final Map.Entry entry : currentOffsets.entrySet()) { final TopicPartition tp = entry.getKey(); final OffsetAndMetadata om = entry.getValue(); log.debug("Flushing up to topic {}, partition {} and offset {}", tp.topic(), tp.partition(), om.offset()); @@ -131,18 +207,53 @@ public void flush(final Map currentOffsets) { } /** - * Perform any cleanup to stop this task. In SinkTasks, this method is invoked only once outstanding calls to other - * methods have completed (e.g., {@link #put(Collection)} has returned) and a final {@link #flush(Map)} and offset - * commit has completed. Implementations of this method should only need to perform final cleanup operations, such - * as closing network connections to the sink system. + * Perform any cleanup to stop this task. In SinkTasks, this method is invoked + * only once outstanding calls to other methods have completed (e.g., + * {@link #put(Collection)} has returned) and a final {@link #flush(Map)} and + * offset commit has completed. Implementations of this method should only need + * to perform final cleanup operations, such as closing network connections to + * the sink system. */ - @Override public void stop() { + @Override + public void stop() { log.trace("[{}] Entry {}.stop", Thread.currentThread().getId(), this.getClass().getName()); - if (writer != null) { - writer.close(); + if (worker != null) { + worker.close(); } log.trace("[{}] Exit {}.stop", Thread.currentThread().getId(), this.getClass().getName()); } -} \ No newline at end of file + + /** + * Create a new JMSWorker. + */ + protected JMSWorker newJMSWorker() { + // Construct a worker to interface with MQ + final JMSWorker worker = new JMSWorker(); + return worker; + } + + /** + * Set the retry backoff time. + * + * @param config the configuration properties + */ + private void setRetryBackoff(final AbstractConfig config) { + // check if a custom retry time is provided + final Long retryBackoffMs = config.getLong(MQSinkConfig.CONFIG_NAME_MQ_RETRY_BACKOFF_MS); + log.debug("Setting retry backoff {}", retryBackoffMs); + } + + + private void maybeCloseAllWorkers(final Throwable exc) { + log.debug(" Checking to see if the failed connection should be closed."); + if (ExceptionProcessor.isClosable(exc)) { + stop(); + } + } + + protected SinkTaskContext getContext() { + return this.context; + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/SSLContextBuilder.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/SSLContextBuilder.java new file mode 100644 index 0000000..7509b6e --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/SSLContextBuilder.java @@ -0,0 +1,84 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink; + +import org.apache.kafka.common.config.types.Password; +import org.apache.kafka.connect.errors.ConnectException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.net.ssl.KeyManager; +import javax.net.ssl.KeyManagerFactory; +import javax.net.ssl.SSLContext; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.security.GeneralSecurityException; +import java.security.KeyStore; +import java.security.SecureRandom; + +public class SSLContextBuilder { + private static final Logger log = LoggerFactory.getLogger(SSLContextBuilder.class); + + public SSLContext buildSslContext(final String sslKeystoreLocation, final Password sslKeystorePassword, + final String sslTruststoreLocation, final Password sslTruststorePassword) { + log.trace("[{}] Entry {}.buildSslContext", Thread.currentThread().getId(), this.getClass().getName()); + + try { + KeyManager[] keyManagers = null; + TrustManager[] trustManagers = null; + + if (sslKeystoreLocation != null) { + final KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + kmf.init(loadKeyStore(sslKeystoreLocation, sslKeystorePassword), sslKeystorePassword.value().toCharArray()); + keyManagers = kmf.getKeyManagers(); + } + + if (sslTruststoreLocation != null) { + final TrustManagerFactory tmf = TrustManagerFactory + .getInstance(TrustManagerFactory.getDefaultAlgorithm()); + tmf.init(loadKeyStore(sslTruststoreLocation, sslTruststorePassword)); + trustManagers = tmf.getTrustManagers(); + } + + final SSLContext sslContext = SSLContext.getInstance("TLS"); + sslContext.init(keyManagers, trustManagers, new SecureRandom()); + + log.trace("[{}] Exit {}.buildSslContext, retval={}", Thread.currentThread().getId(), + this.getClass().getName(), sslContext); + return sslContext; + } catch (final GeneralSecurityException e) { + throw new ConnectException("Error creating SSLContext", e); + } + } + + private KeyStore loadKeyStore(final String location, final Password password) throws GeneralSecurityException { + log.trace("[{}] Entry {}.loadKeyStore", Thread.currentThread().getId(), this.getClass().getName()); + + try (final InputStream ksStr = new FileInputStream(location)) { + final KeyStore ks = KeyStore.getInstance("JKS"); + ks.load(ksStr, password.value().toCharArray()); + + log.trace("[{}] Exit {}.loadKeyStore, retval={}", Thread.currentThread().getId(), + this.getClass().getName(), ks); + return ks; + } catch (final IOException e) { + throw new ConnectException("Error reading keystore " + location, e); + } + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/BaseMessageBuilder.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/BaseMessageBuilder.java index 2a09f85..a90b866 100644 --- a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/BaseMessageBuilder.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/BaseMessageBuilder.java @@ -1,5 +1,5 @@ /** - * Copyright 2018, 2019 IBM Corporation + * Copyright 2018, 2019, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ */ package com.ibm.eventstreams.connect.mqsink.builders; -import com.ibm.eventstreams.connect.mqsink.MQSinkConnector; +import com.ibm.eventstreams.connect.mqsink.MQSinkConfig; import com.ibm.mq.jms.MQQueue; @@ -53,7 +53,7 @@ public enum KeyHeader { NONE, CORRELATION_ID }; /** * Configure this class. - * + * * @param props initial configuration * * @throws ConnectException Operation failed and connector should stop. @@ -61,47 +61,47 @@ public enum KeyHeader { NONE, CORRELATION_ID }; @Override public void configure(final Map props) { log.trace("[{}] Entry {}.configure, props={}", Thread.currentThread().getId(), this.getClass().getName(), props); - final String kh = props.get(MQSinkConnector.CONFIG_NAME_MQ_MESSAGE_BUILDER_KEY_HEADER); + final String kh = props.get(MQSinkConfig.CONFIG_NAME_MQ_MESSAGE_BUILDER_KEY_HEADER); if (kh != null) { - if (kh.equals(MQSinkConnector.CONFIG_VALUE_MQ_MESSAGE_BUILDER_KEY_HEADER_JMSCORRELATIONID)) { + if (kh.equals(MQSinkConfig.CONFIG_VALUE_MQ_MESSAGE_BUILDER_KEY_HEADER_JMSCORRELATIONID)) { keyheader = KeyHeader.CORRELATION_ID; log.debug("Setting JMSCorrelationID header field from Kafka record key"); } else { log.debug("Unsupported MQ message builder key header value {}", kh); - throw new ConnectException("Unsupported MQ message builder key header value"); + throw new MessageBuilderException("Unsupported MQ message builder key header value"); } } - final String rtq = props.get(MQSinkConnector.CONFIG_NAME_MQ_REPLY_QUEUE); + final String rtq = props.get(MQSinkConfig.CONFIG_NAME_MQ_REPLY_QUEUE); if (rtq != null) { try { // The queue URI format supports properties, but we only accept "queue://qmgr/queue" if (rtq.contains("?")) { - throw new ConnectException("Reply-to queue URI must not contain properties"); + throw new MessageBuilderException("Reply-to queue URI must not contain properties"); } else { replyToQueue = new MQQueue(rtq); } } catch (final JMSException jmse) { - throw new ConnectException("Failed to build reply-to queue", jmse); + throw new MessageBuilderException("Failed to build reply-to queue", jmse); } } - final String tpn = props.get(MQSinkConnector.CONFIG_NAME_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY); + final String tpn = props.get(MQSinkConfig.CONFIG_NAME_MQ_MESSAGE_BUILDER_TOPIC_PROPERTY); if (tpn != null) { topicPropertyName = tpn; } - final String ppn = props.get(MQSinkConnector.CONFIG_NAME_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY); + final String ppn = props.get(MQSinkConfig.CONFIG_NAME_MQ_MESSAGE_BUILDER_PARTITION_PROPERTY); if (ppn != null) { partitionPropertyName = ppn; } - final String opn = props.get(MQSinkConnector.CONFIG_NAME_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY); + final String opn = props.get(MQSinkConfig.CONFIG_NAME_MQ_MESSAGE_BUILDER_OFFSET_PROPERTY); if (opn != null) { offsetPropertyName = opn; } - - final String copyhdr = props.get(MQSinkConnector.CONFIG_NAME_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES); + + final String copyhdr = props.get(MQSinkConfig.CONFIG_NAME_KAFKA_HEADERS_COPY_TO_JMS_PROPERTIES); if (copyhdr != null) { copyJmsProperties = Boolean.valueOf(copyhdr); } @@ -111,20 +111,20 @@ public enum KeyHeader { NONE, CORRELATION_ID }; /** * Gets the JMS message for the Kafka Connect SinkRecord. - * + * * @param context the JMS context to use for building messages * @param record the Kafka Connect SinkRecord - * + * * @return the JMS message */ public abstract Message getJMSMessage(JMSContext jmsCtxt, SinkRecord record); /** * Convert a Kafka Connect SinkRecord into a JMS message. - * + * * @param context the JMS context to use for building messages * @param record the Kafka Connect SinkRecord - * + * * @return the JMS message */ @Override public Message fromSinkRecord(final JMSContext jmsCtxt, final SinkRecord record) { @@ -141,7 +141,7 @@ public enum KeyHeader { NONE, CORRELATION_ID }; try { m.setJMSCorrelationIDAsBytes((byte[]) k); } catch (final JMSException jmse) { - throw new ConnectException("Failed to write bytes", jmse); + throw new ConnectException("Failed to write bytes", jmse); } } else if (k instanceof ByteBuffer) { try { @@ -224,5 +224,5 @@ public enum KeyHeader { NONE, CORRELATION_ID }; } return m; - } -} \ No newline at end of file + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/ConverterMessageBuilder.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/ConverterMessageBuilder.java index 69216f8..a9df37b 100644 --- a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/ConverterMessageBuilder.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/ConverterMessageBuilder.java @@ -1,5 +1,5 @@ /** - * Copyright 2018 IBM Corporation + * Copyright 2018, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -15,7 +15,7 @@ */ package com.ibm.eventstreams.connect.mqsink.builders; -import com.ibm.eventstreams.connect.mqsink.MQSinkConnector; +import com.ibm.eventstreams.connect.mqsink.MQSinkConfig; import java.util.Map; @@ -48,7 +48,7 @@ public ConverterMessageBuilder() { /** * Configure this class. - * + * * @param props initial configuration * * @throws ConnectException Operation failed and connector should stop. @@ -58,7 +58,7 @@ public void configure(final Map props) { super.configure(props); - final String converterClass = props.get(MQSinkConnector.CONFIG_NAME_MQ_MESSAGE_BUILDER_VALUE_CONVERTER); + final String converterClass = props.get(MQSinkConfig.CONFIG_NAME_MQ_MESSAGE_BUILDER_VALUE_CONVERTER); try { final Class c = Class.forName(converterClass).asSubclass(Converter.class); @@ -69,10 +69,10 @@ public void configure(final Map props) { final AbstractConfig ac = new AbstractConfig(new ConfigDef(), props, false); // Configure the Converter to convert the value, not the key (isKey == false) - converter.configure(ac.originalsWithPrefix(MQSinkConnector.CONFIG_NAME_MQ_MESSAGE_BUILDER_VALUE_CONVERTER + "."), false); + converter.configure(ac.originalsWithPrefix(MQSinkConfig.CONFIG_NAME_MQ_MESSAGE_BUILDER_VALUE_CONVERTER + "."), false); } catch (ClassNotFoundException | IllegalAccessException | InstantiationException | NullPointerException exc) { log.error("Could not instantiate converter for message builder {}", converterClass); - throw new ConnectException("Could not instantiate converter for message builder", exc); + throw new MessageBuilderException("Could not instantiate converter for message builder", exc); } log.trace("[{}] Exit {}.configure", Thread.currentThread().getId(), this.getClass().getName()); @@ -80,14 +80,14 @@ public void configure(final Map props) { /** * Gets the JMS message for the Kafka Connect SinkRecord. - * + * * @param context the JMS context to use for building messages * @param record the Kafka Connect SinkRecord - * + * * @return the JMS message */ @Override public Message getJMSMessage(final JMSContext jmsCtxt, final SinkRecord record) { final byte[] payload = converter.fromConnectData(record.topic(), record.valueSchema(), record.value()); return jmsCtxt.createTextMessage(new String(payload, UTF_8)); } -} \ No newline at end of file +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilder.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilder.java index 4c69405..5049979 100644 --- a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilder.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/DefaultMessageBuilder.java @@ -1,5 +1,5 @@ /** - * Copyright 2017, 2018 IBM Corporation + * Copyright 2017, 2018, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -47,10 +47,10 @@ public DefaultMessageBuilder() { /** * Gets the JMS message for the Kafka Connect SinkRecord. - * + * * @param context the JMS context to use for building messages * @param record the Kafka Connect SinkRecord - * + * * @return the JMS message */ @Override public Message getJMSMessage(final JMSContext jmsCtxt, final SinkRecord record) { @@ -99,6 +99,6 @@ public DefaultMessageBuilder() { } } - return jmsCtxt.createTextMessage(v.toString()); + return jmsCtxt.createTextMessage(v.toString()); } -} \ No newline at end of file +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/JsonMessageBuilder.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/JsonMessageBuilder.java index 5bdebae..b9c4cb0 100644 --- a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/JsonMessageBuilder.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/JsonMessageBuilder.java @@ -1,5 +1,5 @@ /** - * Copyright 2017, 2018 IBM Corporation + * Copyright 2017, 2018, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -40,7 +40,7 @@ public class JsonMessageBuilder extends BaseMessageBuilder { public JsonMessageBuilder() { log.info("Building messages using com.ibm.eventstreams.connect.mqsink.builders.JsonMessageBuilder"); converter = new JsonConverter(); - + // We just want the payload, not the schema in the output message final HashMap m = new HashMap<>(); m.put("schemas.enable", "false"); @@ -51,14 +51,14 @@ public JsonMessageBuilder() { /** * Gets the JMS message for the Kafka Connect SinkRecord. - * + * * @param context the JMS context to use for building messages * @param record the Kafka Connect SinkRecord - * + * * @return the JMS message */ @Override public Message getJMSMessage(final JMSContext jmsCtxt, final SinkRecord record) { final byte[] payload = converter.fromConnectData(record.topic(), record.valueSchema(), record.value()); return jmsCtxt.createTextMessage(new String(payload, UTF_8)); } -} \ No newline at end of file +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilder.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilder.java index 960f13e..b3cf492 100644 --- a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilder.java +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilder.java @@ -1,5 +1,5 @@ /** - * Copyright 2017, 2018 IBM Corporation + * Copyright 2017, 2018, 2023, 2024 IBM Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -28,7 +28,7 @@ public interface MessageBuilder { /** * Configure this class. - * + * * @param props initial configuration * * @throws ConnectException Operation failed and connector should stop. @@ -37,11 +37,11 @@ default void configure(Map props) {} /** * Convert a Kafka Connect SinkRecord into a message. - * + * * @param context the JMS context to use for building messages * @param record the Kafka Connect SinkRecord - * + * * @return the message */ Message fromSinkRecord(JMSContext context, SinkRecord record); -} \ No newline at end of file +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilderException.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilderException.java new file mode 100644 index 0000000..47908c9 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilderException.java @@ -0,0 +1,31 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink.builders; + +public class MessageBuilderException extends RuntimeException { + + public MessageBuilderException(final String s) { + super(s); + } + + public MessageBuilderException(final String s, final Throwable throwable) { + super(s, throwable); + } + + public MessageBuilderException(final Throwable throwable) { + super(throwable); + } +} diff --git a/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilderFactory.java b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilderFactory.java new file mode 100644 index 0000000..64bb4c9 --- /dev/null +++ b/src/main/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilderFactory.java @@ -0,0 +1,53 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink.builders; + +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.connect.errors.ConnectException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.ibm.eventstreams.connect.mqsink.MQSinkConfig; + + +public class MessageBuilderFactory { + + private static final Logger log = LoggerFactory.getLogger(MessageBuilderFactory.class); + + public static MessageBuilder getMessageBuilder(final AbstractConfig config) throws ConnectException { + return getMessageBuilder( + config.getString(MQSinkConfig.CONFIG_NAME_MQ_MESSAGE_BUILDER), + config); + } + + protected static MessageBuilder getMessageBuilder(final String builderClass, final AbstractConfig config) + throws ConnectException { + + final MessageBuilder builder; + + try { + final Class c = Class.forName(builderClass).asSubclass(MessageBuilder.class); + builder = c.newInstance(); + builder.configure(config.originalsStrings()); + } catch (ClassNotFoundException | ClassCastException | IllegalAccessException | InstantiationException + | NullPointerException exc) { + log.error("Could not instantiate message builder {}", builderClass); + throw new MessageBuilderException("Could not instantiate message builder", exc); + } + + return builder; + } +} diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsink/ExceptionProcessorTest.java b/src/test/java/com/ibm/eventstreams/connect/mqsink/ExceptionProcessorTest.java new file mode 100644 index 0000000..3bb5b43 --- /dev/null +++ b/src/test/java/com/ibm/eventstreams/connect/mqsink/ExceptionProcessorTest.java @@ -0,0 +1,91 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.kafka.connect.errors.ConnectException; + +import com.ibm.mq.MQException; + +import junit.framework.TestCase; + +public class ExceptionProcessorTest extends TestCase { + + public void test_getReasonWithNonMQException() { + final ConnectException exp = new ConnectException("test text"); + final int reason = ExceptionProcessor.getReason(exp); + assertThat(reason).isEqualTo(-1); + } + + public void test_getReasonWithMQException() { + final MQException exp = new MQException(1, 1, getClass()); + final MQException wrapperExp = new MQException(1, 1, exp, exp); + final int reason = ExceptionProcessor.getReason(wrapperExp); + assertThat(reason).isGreaterThan(-1); + } + + public void test_isClosableWithMQExceptionErrorNotClosable() { + final MQException exp = new MQException(1, 1, getClass()); + final MQException wrapperExp = new MQException(1, 1, exp, exp); + final boolean isClosable = ExceptionProcessor.isClosable(wrapperExp); + assertThat(isClosable).isTrue(); + } + + public void test_isClosableWithMQExceptionErrorIsClosable() { + MQException exp = new MQException(1, 2053, getClass()); + MQException wrapperExp = new MQException(1, 1, exp, exp); + boolean isClosable = ExceptionProcessor.isClosable(wrapperExp); + assertThat(isClosable).isFalse(); + + exp = new MQException(1, 2051, getClass()); + wrapperExp = new MQException(1, 1, exp, exp); + isClosable = ExceptionProcessor.isClosable(wrapperExp); + assertThat(isClosable).isFalse(); + } + + public void test_isRetriableWithMQExceptionErrorsAreRetriable() { + final List reasonsRetriable = new ArrayList<>(); + reasonsRetriable.add(2003); + reasonsRetriable.add(2537); + reasonsRetriable.add(2009); + reasonsRetriable.add(2538); + reasonsRetriable.add(2035); + reasonsRetriable.add(2059); + reasonsRetriable.add(2161); + reasonsRetriable.add(2162); + reasonsRetriable.add(2195); + reasonsRetriable.add(2053); + reasonsRetriable.add(2051); + for (final int reason : reasonsRetriable) { + createAndProcessExceptionThrough_isRetriable_andAssert(reason, true); + } + } + + public void test_isRetriableWithMQExceptionErrorsAreNotRetriable() { + createAndProcessExceptionThrough_isRetriable_andAssert(1, false); + } + + private void createAndProcessExceptionThrough_isRetriable_andAssert(final int reason, + final Boolean expectedResult) { + final MQException exp = new MQException(1, reason, getClass()); + final MQException wrapperExp = new MQException(1, 1, exp, exp); + assertThat(ExceptionProcessor.isRetriable(wrapperExp)).isEqualTo(expectedResult); + } +} diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsink/MQSinkConnectorTest.java b/src/test/java/com/ibm/eventstreams/connect/mqsink/MQSinkConnectorTest.java index f9fa5eb..1232618 100644 --- a/src/test/java/com/ibm/eventstreams/connect/mqsink/MQSinkConnectorTest.java +++ b/src/test/java/com/ibm/eventstreams/connect/mqsink/MQSinkConnectorTest.java @@ -1,38 +1,111 @@ /** - * Copyright 2017, 2018, 2019 IBM Corporation - * + * Copyright 2017, 2018, 2019, 2023, 2024 IBM Corporation + *

* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ package com.ibm.eventstreams.connect.mqsink; +import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.connect.connector.Connector; +import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.sink.SinkConnector; import org.junit.Test; +import com.ibm.eventstreams.connect.mqsink.utils.Configs; + +import static com.ibm.eventstreams.connect.mqsink.AbstractJMSContextIT.DEFAULT_MESSAGE_BUILDER; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + public class MQSinkConnectorTest { @Test public void testVersion() { - String version = new MQSinkConnector().version(); - String expectedVersion = System.getProperty("connectorVersion"); + final String version = new MQSinkConnector().version(); + final String expectedVersion = System.getProperty("connectorVersion"); assertEquals("Expected connector version to match version of built jar file.", expectedVersion, version); } @Test public void testConnectorType() { - Connector connector = new MQSinkConnector(); + final Connector connector = new MQSinkConnector(); assertTrue(SinkConnector.class.isAssignableFrom(connector.getClass())); } -} \ No newline at end of file + + @Test + public void testConnectorExactlyOnceSupport() { + final MQSinkConnector connector = new MQSinkConnector(); + final Map props = new HashMap<>(); + props.put("mq.queue.manager", "QM1"); + props.put("mq.connection.mode", "client"); + props.put("mq.connection.name.list", "localhost(1414)"); + props.put("mq.channel.name", "DEV.APP.SVRCONN"); + props.put("mq.queue", "DEV.QUEUE.1"); + props.put("mq.user.authentication.mqcsp", "false"); + props.put("mq.exactly.once.state.queue", "DEV.QUEUE.2"); + + connector.start(props); + + // Test with exactly.once.state.queue is set but the max number of tasks is 2 + // (greater than 1) + assertThrows(ConnectException.class, () -> connector.taskConfigs(2)); + + // Test with exactly.once.state.queue is set and the max number of tasks is 1 + List> expectedTaskConfigs = new ArrayList<>(); + expectedTaskConfigs.add(props); + int maxTask = 1; + assertEquals(connector.taskConfigs(1), expectedTaskConfigs); + + // Test with exactly.once.state.queue is not set + props.remove("mq.exactly.once.state.queue"); + connector.start(props); + maxTask = 2; + expectedTaskConfigs = new ArrayList<>(); + for (int i = 0; i < maxTask; i++) { + expectedTaskConfigs.add(props); + } + assertEquals(connector.taskConfigs(maxTask), expectedTaskConfigs); + } + + @Test + public void testConnectorConfigSupportsExactlyOnce() { + // True if an mq.exactly.once.state.queue value is supplied in the config and + // 'tasks.max' is 1 + final Map configProps_tskMax_devQue = new HashMap(); + configProps_tskMax_devQue.put("mq.exactly.once.state.queue", "DEV.QUEUE.2"); + configProps_tskMax_devQue.put("tasks.max", "1"); + configProps_tskMax_devQue.put("mq.message.builder", DEFAULT_MESSAGE_BUILDER); + + assertTrue(MQSinkConnector.configSupportsExactlyOnce(Configs.customConfig(configProps_tskMax_devQue))); + final Map configProps_devQue = new HashMap(); + configProps_devQue.put("mq.message.builder", DEFAULT_MESSAGE_BUILDER); + configProps_devQue.put("mq.exactly.once.state.queue", "DEV.QUEUE.2"); + assertTrue(MQSinkConnector.configSupportsExactlyOnce(Configs.customConfig(configProps_devQue))); + // False otherwise + final Map configProps_tskMax = new HashMap(); + configProps_tskMax.put("mq.message.builder", DEFAULT_MESSAGE_BUILDER); + configProps_tskMax.put("tasks.max", "1"); + + assertFalse(MQSinkConnector.configSupportsExactlyOnce(Configs.customConfig(configProps_tskMax))); + assertFalse(MQSinkConnector.configSupportsExactlyOnce(Configs.defaultConfig())); + assertFalse(MQSinkConnector.configSupportsExactlyOnce(Configs.customConfig(Collections.singletonMap("mq.exactly.once.state.queue", "")))); + assertFalse(MQSinkConnector.configSupportsExactlyOnce(Configs.customConfig(Collections.singletonMap("mq.exactly.once.state.queue", null)))); + } +} diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilderFactoryTest.java b/src/test/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilderFactoryTest.java new file mode 100644 index 0000000..5f55694 --- /dev/null +++ b/src/test/java/com/ibm/eventstreams/connect/mqsink/builders/MessageBuilderFactoryTest.java @@ -0,0 +1,52 @@ +/** + * Copyright 2023, 2024 IBM Corporation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.ibm.eventstreams.connect.mqsink.builders; + + + +import org.assertj.core.api.Assertions; +import org.junit.Test; + +import com.ibm.eventstreams.connect.mqsink.utils.Configs; + +public class MessageBuilderFactoryTest { + + + + @Test + public void testGetMessageBuilder_ForJsonMessageBuilder() { + final MessageBuilder messageBuilder = MessageBuilderFactory + .getMessageBuilder("com.ibm.eventstreams.connect.mqsink.builders.JsonMessageBuilder", Configs.defaultConfig()); + Assertions.assertThat(messageBuilder).isInstanceOf(JsonMessageBuilder.class); + } + + @Test + public void testGetMessageBuilder_ForDefaultMessageBuilder() { + final MessageBuilder messageBuilder = MessageBuilderFactory + .getMessageBuilder("com.ibm.eventstreams.connect.mqsink.builders.DefaultMessageBuilder", Configs.defaultConfig()); + Assertions.assertThat(messageBuilder).isInstanceOf(DefaultMessageBuilder.class); + } + + @Test(expected = MessageBuilderException.class) + public void testGetMessageBuilder_JunkClass() { + MessageBuilderFactory.getMessageBuilder("casjsajhasdhusdo;iasd", Configs.defaultConfig()); + } + + @Test(expected = MessageBuilderException.class) + public void testGetMessageBuilder_NullProps() { + MessageBuilderFactory.getMessageBuilder("casjsajhasdhusdo;iasd", null); + } +} \ No newline at end of file diff --git a/src/test/java/com/ibm/eventstreams/connect/mqsink/utils/Configs.java b/src/test/java/com/ibm/eventstreams/connect/mqsink/utils/Configs.java new file mode 100644 index 0000000..d9b0e25 --- /dev/null +++ b/src/test/java/com/ibm/eventstreams/connect/mqsink/utils/Configs.java @@ -0,0 +1,47 @@ +/** + * Copyright 2024 IBM Corporation + *

+ * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.ibm.eventstreams.connect.mqsink.utils; + +import java.util.Map; +import java.util.HashMap; + +import org.apache.kafka.common.config.AbstractConfig; + +import com.ibm.eventstreams.connect.mqsink.MQSinkConfig; + +import static com.ibm.eventstreams.connect.mqsink.AbstractJMSContextIT.DEFAULT_MESSAGE_BUILDER; + +public class Configs { + + static Map options = new HashMap<>(); + + static { + options.put(MQSinkConfig.CONFIG_NAME_MQ_QUEUE, "TEST.QUEUE"); + options.put(MQSinkConfig.CONFIG_NAME_MQ_QUEUE_MANAGER, "TEST.QUEUE.MANAGER"); + options.put(MQSinkConfig.CONFIG_NAME_MQ_MESSAGE_BUILDER, DEFAULT_MESSAGE_BUILDER); + } + + public static AbstractConfig defaultConfig() { + return new AbstractConfig(MQSinkConfig.config(), options); + } + + public static AbstractConfig customConfig(Map overrides) { + Map customOptions = new HashMap<>(options); + customOptions.putAll(overrides); + return new AbstractConfig(MQSinkConfig.config(), customOptions); + } +} \ No newline at end of file diff --git a/src/test/resources/log4j.properties b/src/test/resources/log4j.properties index 514f678..fc924ba 100644 --- a/src/test/resources/log4j.properties +++ b/src/test/resources/log4j.properties @@ -1,5 +1,5 @@ # -# Copyright 2017, 2018, 2019 IBM Corporation +# Copyright 2017, 2018, 2019, 2023, 2024 IBM Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,4 +19,4 @@ log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c:%L)%n -log4j.logger.org.apache.kafka=ERROR \ No newline at end of file +log4j.logger.org.apache.kafka=ERROR