From 333ffdb00588a7bcd80b21d352d6f4f44597c9a5 Mon Sep 17 00:00:00 2001 From: liuszeng Date: Fri, 8 Sep 2017 17:02:54 +0000 Subject: [PATCH] Release of version 1.2.0 --- AWSIoTPythonSDK/MQTTLib.py | 416 ++++++++++++++-- AWSIoTPythonSDK/__init__.py | 5 +- .../__init__.py | 0 .../core/greengrass/discovery/__init__.py | 0 .../core/greengrass/discovery/models.py | 466 ++++++++++++++++++ .../core/greengrass/discovery/providers.py | 404 +++++++++++++++ .../core/protocol/connection/__init__.py | 0 .../cores.py} | 310 ++++++++++-- .../core/protocol/internal/__init__.py | 0 .../core/protocol/internal/clients.py | 233 +++++++++ .../core/protocol/internal/defaults.py | 18 + .../core/protocol/internal/events.py | 29 ++ .../core/protocol/internal/queues.py | 87 ++++ .../core/protocol/internal/requests.py | 27 + .../core/protocol/internal/workers.py | 284 +++++++++++ AWSIoTPythonSDK/core/protocol/mqttCore.py | 459 ----------------- AWSIoTPythonSDK/core/protocol/mqtt_core.py | 321 ++++++++++++ AWSIoTPythonSDK/core/protocol/paho/client.py | 15 +- AWSIoTPythonSDK/core/shadow/deviceShadow.py | 231 ++++----- AWSIoTPythonSDK/core/shadow/shadowManager.py | 41 +- AWSIoTPythonSDK/core/util/enums.py | 19 + .../core/util/offlinePublishQueue.py | 92 ---- .../core/util/progressiveBackoffCore.py | 91 ---- AWSIoTPythonSDK/core/util/providers.py | 92 ++++ AWSIoTPythonSDK/core/util/sigV4Core.py | 187 ------- AWSIoTPythonSDK/exception/AWSIoTExceptions.py | 53 +- AWSIoTPythonSDK/exception/operationError.py | 2 +- .../exception/operationTimeoutException.py | 2 +- CHANGELOG.rst | 12 + README.rst | 124 ++++- samples/ThingShadowEcho/ThingShadowEcho.py | 60 +-- samples/basicPubSub/basicPubSub.py | 44 +- samples/basicPubSub/basicPubSubAsync.py | 116 +++++ samples/basicPubSub/basicPubSub_CognitoSTS.py | 27 +- .../basicShadow/basicShadowDeltaListener.py | 48 +- samples/basicShadow/basicShadowUpdater.py | 3 +- samples/greengrass/basicDiscovery.py | 157 ++++++ setup.py | 28 +- 38 files changed, 3311 insertions(+), 1192 deletions(-) rename AWSIoTPythonSDK/core/{protocol/paho/securedWebsocket => greengrass}/__init__.py (100%) mode change 100755 => 100644 create mode 100644 AWSIoTPythonSDK/core/greengrass/discovery/__init__.py create mode 100644 AWSIoTPythonSDK/core/greengrass/discovery/models.py create mode 100644 AWSIoTPythonSDK/core/greengrass/discovery/providers.py create mode 100644 AWSIoTPythonSDK/core/protocol/connection/__init__.py rename AWSIoTPythonSDK/core/protocol/{paho/securedWebsocket/securedWebsocketCore.py => connection/cores.py} (59%) mode change 100755 => 100644 create mode 100644 AWSIoTPythonSDK/core/protocol/internal/__init__.py create mode 100644 AWSIoTPythonSDK/core/protocol/internal/clients.py create mode 100644 AWSIoTPythonSDK/core/protocol/internal/defaults.py create mode 100644 AWSIoTPythonSDK/core/protocol/internal/events.py create mode 100644 AWSIoTPythonSDK/core/protocol/internal/queues.py create mode 100644 AWSIoTPythonSDK/core/protocol/internal/requests.py create mode 100644 AWSIoTPythonSDK/core/protocol/internal/workers.py delete mode 100755 AWSIoTPythonSDK/core/protocol/mqttCore.py create mode 100644 AWSIoTPythonSDK/core/protocol/mqtt_core.py create mode 100644 AWSIoTPythonSDK/core/util/enums.py delete mode 100755 AWSIoTPythonSDK/core/util/offlinePublishQueue.py delete mode 100755 AWSIoTPythonSDK/core/util/progressiveBackoffCore.py create mode 100644 AWSIoTPythonSDK/core/util/providers.py delete mode 100755 AWSIoTPythonSDK/core/util/sigV4Core.py create mode 100644 samples/basicPubSub/basicPubSubAsync.py create mode 100644 samples/greengrass/basicDiscovery.py diff --git a/AWSIoTPythonSDK/MQTTLib.py b/AWSIoTPythonSDK/MQTTLib.py index 083ad54..f5f176b 100755 --- a/AWSIoTPythonSDK/MQTTLib.py +++ b/AWSIoTPythonSDK/MQTTLib.py @@ -1,6 +1,6 @@ # #/* -# * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # * # * Licensed under the Apache License, Version 2.0 (the "License"). # * You may not use this file except in compliance with the License. @@ -14,20 +14,22 @@ # * permissions and limitations under the License. # */ -# import mqttCore -import AWSIoTPythonSDK.core.protocol.mqttCore as mqttCore -# import shadowManager +from AWSIoTPythonSDK.core.util.providers import CertificateCredentialsProvider +from AWSIoTPythonSDK.core.util.providers import IAMCredentialsProvider +from AWSIoTPythonSDK.core.util.providers import EndpointProvider +from AWSIoTPythonSDK.core.protocol.mqtt_core import MqttCore import AWSIoTPythonSDK.core.shadow.shadowManager as shadowManager -# import deviceShadow import AWSIoTPythonSDK.core.shadow.deviceShadow as deviceShadow + + # Constants # - Protocol types: MQTTv3_1 = 3 MQTTv3_1_1 = 4 -# - OfflinePublishQueueing drop behavior: + DROP_OLDEST = 0 DROP_NEWEST = 1 -# + class AWSIoTMQTTClient: @@ -78,14 +80,13 @@ def __init__(self, clientID, protocolType=MQTTv3_1_1, useWebsocket=False, cleanS **Returns** - AWSIoTPythonSDK.MQTTLib.AWSIoTMQTTClient object + :code:`AWSIoTPythonSDK.MQTTLib.AWSIoTMQTTClient` object """ - # mqttCore(clientID, cleanSession, protocol, srcLogManager, srcUseWebsocket=False) - self._mqttCore = mqttCore.mqttCore(clientID, cleanSession, protocolType, useWebsocket) + self._mqtt_core = MqttCore(clientID, cleanSession, protocolType, useWebsocket) # Configuration APIs - def configureLastWill(self, topic, payload, QoS): + def configureLastWill(self, topic, payload, QoS, retain=False): """ **Description** @@ -110,8 +111,7 @@ def configureLastWill(self, topic, payload, QoS): None """ - # mqttCore.setLastWill(srcTopic, srcPayload, srcQos) - self._mqttCore.setLastWill(topic, payload, QoS) + self._mqtt_core.configure_last_will(topic, payload, QoS, retain) def clearLastWill(self): """ @@ -134,8 +134,7 @@ def clearLastWill(self): None """ - #mqttCore.clearLastWill() - self._mqttCore.clearLastWill() + self._mqtt_core.clear_last_will() def configureEndpoint(self, hostName, portNumber): """ @@ -162,8 +161,10 @@ def configureEndpoint(self, hostName, portNumber): None """ - # mqttCore.configEndpoint(srcHost, srcPort) - self._mqttCore.configEndpoint(hostName, portNumber) + endpoint_provider = EndpointProvider() + endpoint_provider.set_host(hostName) + endpoint_provider.set_port(portNumber) + self._mqtt_core.configure_endpoint(endpoint_provider) def configureIAMCredentials(self, AWSAccessKeyID, AWSSecretAccessKey, AWSSessionToken=""): """ @@ -196,8 +197,11 @@ def configureIAMCredentials(self, AWSAccessKeyID, AWSSecretAccessKey, AWSSession None """ - # mqttCore.configIAMCredentials(srcAWSAccessKeyID, srcAWSSecretAccessKey, srcAWSSessionToken) - self._mqttCore.configIAMCredentials(AWSAccessKeyID, AWSSecretAccessKey, AWSSessionToken) + iam_credentials_provider = IAMCredentialsProvider() + iam_credentials_provider.set_access_key_id(AWSAccessKeyID) + iam_credentials_provider.set_secret_access_key(AWSSecretAccessKey) + iam_credentials_provider.set_session_token(AWSSessionToken) + self._mqtt_core.configure_iam_credentials(iam_credentials_provider) def configureCredentials(self, CAFilePath, KeyPath="", CertificatePath=""): # Should be good for MutualAuth certs config and Websocket rootCA config """ @@ -224,8 +228,11 @@ def configureCredentials(self, CAFilePath, KeyPath="", CertificatePath=""): # S None """ - # mqttCore.configCredentials(srcCAFile, srcKey, srcCert) - self._mqttCore.configCredentials(CAFilePath, KeyPath, CertificatePath) + cert_credentials_provider = CertificateCredentialsProvider() + cert_credentials_provider.set_ca_path(CAFilePath) + cert_credentials_provider.set_key_path(KeyPath) + cert_credentials_provider.set_cert_path(CertificatePath) + self._mqtt_core.configure_cert_credentials(cert_credentials_provider) def configureAutoReconnectBackoffTime(self, baseReconnectQuietTimeSecond, maxReconnectQuietTimeSecond, stableConnectionTimeSecond): """ @@ -256,15 +263,14 @@ def configureAutoReconnectBackoffTime(self, baseReconnectQuietTimeSecond, maxRec None """ - # mqttCore.setBackoffTime(srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond) - self._mqttCore.setBackoffTime(baseReconnectQuietTimeSecond, maxReconnectQuietTimeSecond, stableConnectionTimeSecond) + self._mqtt_core.configure_reconnect_back_off(baseReconnectQuietTimeSecond, maxReconnectQuietTimeSecond, stableConnectionTimeSecond) def configureOfflinePublishQueueing(self, queueSize, dropBehavior=DROP_NEWEST): """ **Description** - Used to configure the queue size and drop behavior for the offline publish requests queueing. Should be - called before connect. + Used to configure the queue size and drop behavior for the offline requests queueing. Should be + called before connect. Queueable offline requests include publish, subscribe and unsubscribe. **Syntax** @@ -282,15 +288,15 @@ def configureOfflinePublishQueueing(self, queueSize, dropBehavior=DROP_NEWEST): If set to 0, the queue is disabled. If set to -1, the queue size is set to be infinite. *dropBehavior* - the type of drop behavior when the queue is full. - Could be :code:`AWSIoTPythonSDK.MQTTLib.DROP_OLDEST` or :code:`AWSIoTPythonSDK.MQTTLib.DROP_NEWEST`. + Could be :code:`AWSIoTPythonSDK.core.util.enums.DropBehaviorTypes.DROP_OLDEST` or + :code:`AWSIoTPythonSDK.core.util.enums.DropBehaviorTypes.DROP_NEWEST`. **Returns** None """ - # mqttCore.setOfflinePublishQueueing(srcQueueSize, srcDropBehavior=mqtt.MSG_QUEUEING_DROP_NEWEST) - self._mqttCore.setOfflinePublishQueueing(queueSize, dropBehavior) + self._mqtt_core.configure_offline_requests_queue(queueSize, dropBehavior) def configureDrainingFrequency(self, frequencyInHz): """ @@ -320,8 +326,7 @@ def configureDrainingFrequency(self, frequencyInHz): None """ - # mqttCore.setDrainingIntervalSecond(srcDrainingIntervalSecond) - self._mqttCore.setDrainingIntervalSecond(1/float(frequencyInHz)) + self._mqtt_core.configure_draining_interval_sec(1/float(frequencyInHz)) def configureConnectDisconnectTimeout(self, timeoutSecond): """ @@ -346,8 +351,7 @@ def configureConnectDisconnectTimeout(self, timeoutSecond): None """ - # mqttCore.setConnectDisconnectTimeoutSecond(srcConnectDisconnectTimeout) - self._mqttCore.setConnectDisconnectTimeoutSecond(timeoutSecond) + self._mqtt_core.configure_connect_disconnect_timeout_sec(timeoutSecond) def configureMQTTOperationTimeout(self, timeoutSecond): """ @@ -372,15 +376,14 @@ def configureMQTTOperationTimeout(self, timeoutSecond): None """ - # mqttCore.setMQTTOperationTimeoutSecond(srcMQTTOperationTimeout) - self._mqttCore.setMQTTOperationTimeoutSecond(timeoutSecond) + self._mqtt_core.configure_operation_timeout_sec(timeoutSecond) # MQTT functionality APIs def connect(self, keepAliveIntervalSecond=30): """ **Description** - Connect to AWS IoT, with user-specific keeoalive interval configuration. + Connect to AWS IoT, with user-specific keepalive interval configuration. **Syntax** @@ -401,8 +404,45 @@ def connect(self, keepAliveIntervalSecond=30): True if the connect attempt succeeded. False if failed. """ - # mqttCore.connect(keepAliveInterval=30) - return self._mqttCore.connect(keepAliveIntervalSecond) + self._load_callbacks() + return self._mqtt_core.connect(keepAliveIntervalSecond) + + def connectAsync(self, keepAliveIntervalSecond=30, ackCallback=None): + """ + **Description** + + Connect asynchronously to AWS IoT, with user-specific keepalive interval configuration and CONNACK callback. + + **Syntax** + + .. code:: python + + # Connect to AWS IoT with default keepalive set to 30 seconds and a custom CONNACK callback + myAWSIoTMQTTClient.connectAsync(ackCallback=my_connack_callback) + # Connect to AWS IoT with default keepalive set to 55 seconds and a custom CONNACK callback + myAWSIoTMQTTClient.connectAsync(keepAliveInternvalSecond=55, ackCallback=myConnackCallback) + + **Parameters** + + *keepAliveIntervalSecond* - Time in seconds for interval of sending MQTT ping request. + Default set to 30 seconds. + + *ackCallback* - Callback to be invoked when the client receives a CONNACK. Should be in form + :code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the connect request + and :code:`data` is the connect result code. + + **Returns** + + Connect request packet id, for tracking purpose in the corresponding callback. + + """ + self._load_callbacks() + return self._mqtt_core.connect_async(keepAliveIntervalSecond, ackCallback) + + def _load_callbacks(self): + self._mqtt_core.on_online = self.onOnline + self._mqtt_core.on_offline = self.onOffline + self._mqtt_core.on_message = self.onMessage def disconnect(self): """ @@ -425,8 +465,32 @@ def disconnect(self): True if the disconnect attempt succeeded. False if failed. """ - # mqttCore.disconnect() - return self._mqttCore.disconnect() + return self._mqtt_core.disconnect() + + def disconnectAsync(self, ackCallback=None): + """ + **Description** + + Disconnect asynchronously to AWS IoT. + + **Syntax** + + .. code:: python + + myAWSIoTMQTTClient.disconnectAsync(ackCallback=myDisconnectCallback) + + **Parameters** + + *ackCallback* - Callback to be invoked when the client finishes sending disconnect and internal clean-up. + Should be in form :code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the disconnect + request and :code:`data` is the disconnect result code. + + **Returns** + + Disconnect request packet id, for tracking purpose in the corresponding callback. + + """ + return self._mqtt_core.disconnect_async(ackCallback) def publish(self, topic, payload, QoS): """ @@ -438,7 +502,7 @@ def publish(self, topic, payload, QoS): .. code:: python - # Publish a QoS0 message "myPayload" to topic "myToppic" + # Publish a QoS0 message "myPayload" to topic "myTopic" myAWSIoTMQTTClient.publish("myTopic", "myPayload", 0) # Publish a QoS1 message "myPayloadWithQos1" to topic "myTopic/sub" myAWSIoTMQTTClient.publish("myTopic/sub", "myPayloadWithQos1", 1) @@ -456,8 +520,41 @@ def publish(self, topic, payload, QoS): True if the publish request has been sent to paho. False if the request did not reach paho. """ - # mqttCore.publish(topic, payload, qos, retain) - return self._mqttCore.publish(topic, payload, QoS, False) # Disable retain for publish by now + return self._mqtt_core.publish(topic, payload, QoS, False) # Disable retain for publish by now + + def publishAsync(self, topic, payload, QoS, ackCallback=None): + """ + **Description** + + Publish a new message asynchronously to the desired topic with QoS and PUBACK callback. Note that the ack + callback configuration for a QoS0 publish request will be ignored as there are no PUBACK reception. + + **Syntax** + + .. code:: python + + # Publish a QoS0 message "myPayload" to topic "myTopic" + myAWSIoTMQTTClient.publishAsync("myTopic", "myPayload", 0) + # Publish a QoS1 message "myPayloadWithQos1" to topic "myTopic/sub", with custom PUBACK callback + myAWSIoTMQTTClient.publishAsync("myTopic/sub", "myPayloadWithQos1", 1, ackCallback=myPubackCallback) + + **Parameters** + + *topic* - Topic name to publish to. + + *payload* - Payload to publish. + + *QoS* - Quality of Service. Could be 0 or 1. + + *ackCallback* - Callback to be invoked when the client receives a PUBACK. Should be in form + :code:`customCallback(mid)`, where :code:`mid` is the packet id for the disconnect request. + + **Returns** + + Publish request packet id, for tracking purpose in the corresponding callback. + + """ + return self._mqtt_core.publish_async(topic, payload, QoS, False, ackCallback) def subscribe(self, topic, QoS, callback): """ @@ -489,14 +586,49 @@ def subscribe(self, topic, QoS, callback): True if the subscribe attempt succeeded. False if failed. """ - # mqttCore.subscribe(topic, qos, callback) - return self._mqttCore.subscribe(topic, QoS, callback) + return self._mqtt_core.subscribe(topic, QoS, callback) + + def subscribeAsync(self, topic, QoS, ackCallback=None, messageCallback=None): + """ + **Description** + + Subscribe to the desired topic and register a message callback with SUBACK callback. + + **Syntax** + + .. code:: python + + # Subscribe to "myTopic" with QoS0, custom SUBACK callback and a message callback + myAWSIoTMQTTClient.subscribe("myTopic", 0, ackCallback=mySubackCallback, messageCallback=customMessageCallback) + # Subscribe to "myTopic/#" with QoS1, custom SUBACK callback and a message callback + myAWSIoTMQTTClient.subscribe("myTopic/#", 1, ackCallback=mySubackCallback, messageCallback=customMessageCallback) + + **Parameters** + + *topic* - Topic name or filter to subscribe to. + + *QoS* - Quality of Service. Could be 0 or 1. + + *ackCallback* - Callback to be invoked when the client receives a SUBACK. Should be in form + :code:`customCallback(mid, data)`, where :code:`mid` is the packet id for the disconnect request and + :code:`data` is the granted QoS for this subscription. + + *messageCallback* - Function to be called when a new message for the subscribed topic + comes in. Should be in form :code:`customCallback(client, userdata, message)`, where + :code:`message` contains :code:`topic` and :code:`payload`. + + **Returns** + + Subscribe request packet id, for tracking purpose in the corresponding callback. + + """ + return self._mqtt_core.subscribe_async(topic, QoS, ackCallback, messageCallback) def unsubscribe(self, topic): """ **Description** - Unsubscribed to the desired topic. + Unsubscribe to the desired topic. **Syntax** @@ -513,8 +645,111 @@ def unsubscribe(self, topic): True if the unsubscribe attempt succeeded. False if failed. """ - # mqttCore.unsubscribe(topic) - return self._mqttCore.unsubscribe(topic) + return self._mqtt_core.unsubscribe(topic) + + def unsubscribeAsync(self, topic, ackCallback=None): + """ + **Description** + + Unsubscribe to the desired topic with UNSUBACK callback. + + **Syntax** + + .. code:: python + + myAWSIoTMQTTClient.unsubscribe("myTopic", ackCallback=myUnsubackCallback) + + **Parameters** + + *topic* - Topic name or filter to unsubscribe to. + + *ackCallback* - Callback to be invoked when the client receives a UNSUBACK. Should be in form + :code:`customCallback(mid)`, where :code:`mid` is the packet id for the disconnect request. + + **Returns** + + Unsubscribe request packet id, for tracking purpose in the corresponding callback. + + """ + return self._mqtt_core.unsubscribe_async(topic, ackCallback) + + def onOnline(self): + """ + **Description** + + Callback that gets called when the client is online. The callback registration should happen before calling + connect/connectAsync. + + **Syntax** + + .. code:: python + + # Register an onOnline callback + myAWSIoTMQTTClient.onOnline = myOnOnlineCallback + + **Parameters** + + None + + **Returns** + + None + + """ + pass + + def onOffline(self): + """ + **Description** + + Callback that gets called when the client is offline. The callback registration should happen before calling + connect/connectAsync. + + **Syntax** + + .. code:: python + + # Register an onOffline callback + myAWSIoTMQTTClient.onOffline = myOnOfflineCallback + + **Parameters** + + None + + **Returns** + + None + + """ + pass + + def onMessage(self, message): + """ + **Description** + + Callback that gets called when the client receives a new message. The callback registration should happen before + calling connect/connectAsync. This callback, if present, will always be triggered regardless of whether there is + any message callback registered upon subscribe API call. It is for the purpose to aggregating the processing of + received messages in one function. + + **Syntax** + + .. code:: python + + # Register an onMessage callback + myAWSIoTMQTTClient.onMessage = myOnMessageCallback + + **Parameters** + + *message* - Received MQTT message. It contains the source topic as :code:`message.topic`, and the payload as + :code:`message.payload`. + + **Returns** + + None + + """ + pass class AWSIoTMQTTShadowClient: @@ -569,10 +804,10 @@ def __init__(self, clientID, protocolType=MQTTv3_1_1, useWebsocket=False, cleanS # AWSIOTMQTTClient instance self._AWSIoTMQTTClient = AWSIoTMQTTClient(clientID, protocolType, useWebsocket, cleanSession) # Configure it to disable offline Publish Queueing - self._AWSIoTMQTTClient.configureOfflinePublishQueueing(0) # Disable queueing, no queueing for time-sentive shadow messages + self._AWSIoTMQTTClient.configureOfflinePublishQueueing(0) # Disable queueing, no queueing for time-sensitive shadow messages self._AWSIoTMQTTClient.configureDrainingFrequency(10) # Now retrieve the configured mqttCore and init a shadowManager instance - self._shadowManager = shadowManager.shadowManager(self._AWSIoTMQTTClient._mqttCore) + self._shadowManager = shadowManager.shadowManager(self._AWSIoTMQTTClient._mqtt_core) # Configuration APIs def configureLastWill(self, topic, payload, QoS): @@ -693,12 +928,26 @@ def configureCredentials(self, CAFilePath, KeyPath="", CertificatePath=""): # S """ **Description** + Used to configure the rootCA, private key and certificate files. Should be called before connect. + **Syntax** + .. code:: python + + myAWSIoTMQTTClient.configureCredentials("PATH/TO/ROOT_CA", "PATH/TO/PRIVATE_KEY", "PATH/TO/CERTIFICATE") + **Parameters** + *CAFilePath* - Path to read the root CA file. Required for all connection types. + + *KeyPath* - Path to read the private key. Required for X.509 certificate based connection. + + *CertificatePath* - Path to read the certificate. Required for X.509 certificate based connection. + **Returns** + None + """ # AWSIoTMQTTClient.configureCredentials self._AWSIoTMQTTClient.configureCredentials(CAFilePath, KeyPath, CertificatePath) @@ -707,21 +956,25 @@ def configureAutoReconnectBackoffTime(self, baseReconnectQuietTimeSecond, maxRec """ **Description** - Used to configure the rootCA, private key and certificate files. Should be called before connect. + Used to configure the auto-reconnect backoff timing. Should be called before connect. **Syntax** .. code:: python - myAWSIoTMQTTShadowClient.configureCredentials("PATH/TO/ROOT_CA", "PATH/TO/PRIVATE_KEY", "PATH/TO/CERTIFICATE") + # Configure the auto-reconnect backoff to start with 1 second and use 128 seconds as a maximum back off time. + # Connection over 20 seconds is considered stable and will reset the back off time back to its base. + myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 128, 20) **Parameters** - *CAFilePath* - Path to read the root CA file. Required for all connection types. + *baseReconnectQuietTimeSecond* - The initial back off time to start with, in seconds. + Should be less than the stableConnectionTime. - *KeyPath* - Path to read the private key. Required for X.509 certificate based connection. + *maxReconnectQuietTimeSecond* - The maximum back off time, in seconds. - *CertificatePath* - Path to read the certificate. Required for X.509 certificate based connection. + *stableConnectionTimeSecond* - The number of seconds for a connection to last to be considered as stable. + Back off time will be reset to base once the connection is stable. **Returns** @@ -809,8 +1062,13 @@ def connect(self, keepAliveIntervalSecond=30): True if the connect attempt succeeded. False if failed. """ + self._load_callbacks() return self._AWSIoTMQTTClient.connect(keepAliveIntervalSecond) + def _load_callbacks(self): + self._AWSIoTMQTTClient.onOnline = self.onOnline + self._AWSIoTMQTTClient.onOffline = self.onOffline + # End the MQTT connection def disconnect(self): """ @@ -903,3 +1161,53 @@ def getMQTTConnection(self): """ # Return the internal AWSIoTMQTTClient instance return self._AWSIoTMQTTClient + + def onOnline(self): + """ + **Description** + + Callback that gets called when the client is online. The callback registration should happen before calling + connect. + + **Syntax** + + .. code:: python + + # Register an onOnline callback + myAWSIoTMQTTShadowClient.onOnline = myOnOnlineCallback + + **Parameters** + + None + + **Returns** + + None + + """ + pass + + def onOffline(self): + """ + **Description** + + Callback that gets called when the client is offline. The callback registration should happen before calling + connect. + + **Syntax** + + .. code:: python + + # Register an onOffline callback + myAWSIoTMQTTShadowClient.onOffline = myOnOfflineCallback + + **Parameters** + + None + + **Returns** + + None + + """ + pass diff --git a/AWSIoTPythonSDK/__init__.py b/AWSIoTPythonSDK/__init__.py index 3925732..8ba2541 100755 --- a/AWSIoTPythonSDK/__init__.py +++ b/AWSIoTPythonSDK/__init__.py @@ -1,6 +1,3 @@ -import os -import sys - -__version__ = "1.1.2" +__version__ = "1.2.0" diff --git a/AWSIoTPythonSDK/core/protocol/paho/securedWebsocket/__init__.py b/AWSIoTPythonSDK/core/greengrass/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from AWSIoTPythonSDK/core/protocol/paho/securedWebsocket/__init__.py rename to AWSIoTPythonSDK/core/greengrass/__init__.py diff --git a/AWSIoTPythonSDK/core/greengrass/discovery/__init__.py b/AWSIoTPythonSDK/core/greengrass/discovery/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/AWSIoTPythonSDK/core/greengrass/discovery/models.py b/AWSIoTPythonSDK/core/greengrass/discovery/models.py new file mode 100644 index 0000000..d04f13f --- /dev/null +++ b/AWSIoTPythonSDK/core/greengrass/discovery/models.py @@ -0,0 +1,466 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + +import json + + +KEY_GROUP_LIST = "GGGroups" +KEY_GROUP_ID = "GGGroupId" +KEY_CORE_LIST = "Cores" +KEY_CORE_ARN = "thingArn" +KEY_CA_LIST = "CAs" +KEY_CONNECTIVITY_INFO_LIST = "Connectivity" +KEY_CONNECTIVITY_INFO_ID = "Id" +KEY_HOST_ADDRESS = "HostAddress" +KEY_PORT_NUMBER = "PortNumber" +KEY_METADATA = "Metadata" + + +class ConnectivityInfo(object): + """ + + Class the stores one set of the connectivity information. + This is the data model for easy access to the discovery information from the discovery request function call. No + need to call directly from user scripts. + + """ + + def __init__(self, id, host, port, metadata): + self._id = id + self._host = host + self._port = port + self._metadata = metadata + + @property + def id(self): + """ + + Connectivity Information Id. + + """ + return self._id + + @property + def host(self): + """ + + Host address. + + """ + return self._host + + @property + def port(self): + """ + + Port number. + + """ + return self._port + + @property + def metadata(self): + """ + + Metadata string. + + """ + return self._metadata + + +class CoreConnectivityInfo(object): + """ + + Class that stores the connectivity information for a Greengrass core. + This is the data model for easy access to the discovery information from the discovery request function call. No + need to call directly from user scripts. + + """ + + def __init__(self, coreThingArn, groupId): + self._core_thing_arn = coreThingArn + self._group_id = groupId + self._connectivity_info_dict = dict() + + @property + def coreThingArn(self): + """ + + Thing arn for this Greengrass core. + + """ + return self._core_thing_arn + + @property + def groupId(self): + """ + + Greengrass group id that this Greengrass core belongs to. + + """ + return self._group_id + + @property + def connectivityInfoList(self): + """ + + The list of connectivity information that this Greengrass core has. + + """ + return list(self._connectivity_info_dict.values()) + + def getConnectivityInfo(self, id): + """ + + **Description** + + Used for quickly accessing a certain set of connectivity information by id. + + **Syntax** + + .. code:: python + + myCoreConnectivityInfo.getConnectivityInfo("CoolId") + + **Parameters** + + *id* - The id for the desired connectivity information. + + **Return** + + :code:`AWSIoTPythonSDK.core.greengrass.discovery.models.ConnectivityInfo` object. + + """ + return self._connectivity_info_dict.get(id) + + def appendConnectivityInfo(self, connectivityInfo): + """ + + **Description** + + Used for adding a new set of connectivity information to the list for this Greengrass core. This is used by the + SDK internally. No need to call directly from user scripts. + + **Syntax** + + .. code:: python + + myCoreConnectivityInfo.appendConnectivityInfo(newInfo) + + **Parameters** + + *connectivityInfo* - :code:`AWSIoTPythonSDK.core.greengrass.discovery.models.ConnectivityInfo` object. + + **Returns** + + None + + """ + self._connectivity_info_dict[connectivityInfo.id] = connectivityInfo + + +class GroupConnectivityInfo(object): + """ + + Class that stores the connectivity information for a specific Greengrass group. + This is the data model for easy access to the discovery information from the discovery request function call. No + need to call directly from user scripts. + + """ + def __init__(self, groupId): + self._group_id = groupId + self._core_connectivity_info_dict = dict() + self._ca_list = list() + + @property + def groupId(self): + """ + + Id for this Greengrass group. + + """ + return self._group_id + + @property + def coreConnectivityInfoList(self): + """ + + A list of Greengrass cores + (:code:`AWSIoTPythonSDK.core.greengrass.discovery.models.CoreConnectivityInfo` object) that belong to this + Greengrass group. + + """ + return list(self._core_connectivity_info_dict.values()) + + @property + def caList(self): + """ + + A list of CA content strings for this Greengrass group. + + """ + return self._ca_list + + def getCoreConnectivityInfo(self, coreThingArn): + """ + + **Description** + + Used to retrieve the corresponding :code:`AWSIoTPythonSDK.core.greengrass.discovery.models.CoreConnectivityInfo` + object by core thing arn. + + **Syntax** + + .. code:: python + + myGroupConnectivityInfo.getCoreConnectivityInfo("YourOwnArnString") + + **Parameters** + + coreThingArn - Thing arn for the desired Greengrass core. + + **Returns** + + :code:`AWSIoTPythonSDK.core.greengrass.discovery.CoreConnectivityInfo` object. + + """ + return self._core_connectivity_info_dict.get(coreThingArn) + + def appendCoreConnectivityInfo(self, coreConnectivityInfo): + """ + + **Description** + + Used to append new core connectivity information to this group connectivity information. This is used by the + SDK internally. No need to call directly from user scripts. + + **Syntax** + + .. code:: python + + myGroupConnectivityInfo.appendCoreConnectivityInfo(newCoreConnectivityInfo) + + **Parameters** + + *coreConnectivityInfo* - :code:`AWSIoTPythonSDK.core.greengrass.discovery.models.CoreConnectivityInfo` object. + + **Returns** + + None + + """ + self._core_connectivity_info_dict[coreConnectivityInfo.coreThingArn] = coreConnectivityInfo + + def appendCa(self, ca): + """ + + **Description** + + Used to append new CA content string to this group connectivity information. This is used by the SDK internally. + No need to call directly from user scripts. + + **Syntax** + + .. code:: python + + myGroupConnectivityInfo.appendCa("CaContentString") + + **Parameters** + + *ca* - Group CA content string. + + **Returns** + + None + + """ + self._ca_list.append(ca) + + +class DiscoveryInfo(object): + """ + + Class that stores the discovery information coming back from the discovery request. + This is the data model for easy access to the discovery information from the discovery request function call. No + need to call directly from user scripts. + + """ + def __init__(self, rawJson): + self._raw_json = rawJson + + @property + def rawJson(self): + """ + + JSON response string that contains the discovery information. This is reserved in case users want to do + some process by themselves. + + """ + return self._raw_json + + def getAllCores(self): + """ + + **Description** + + Used to retrieve the list of :code:`AWSIoTPythonSDK.core.greengrass.discovery.models.CoreConnectivityInfo` + object for this discovery information. The retrieved cores could be from different Greengrass groups. This is + designed for uses who want to iterate through all available cores at the same time, regardless of which group + those cores are in. + + **Syntax** + + .. code:: python + + myDiscoveryInfo.getAllCores() + + **Parameters** + + None + + **Returns** + + List of :code:`AWSIoTPythonSDK.core.greengrass.discovery.models.CoreConnectivtyInfo` object. + + """ + groups_list = self.getAllGroups() + core_list = list() + + for group in groups_list: + core_list.extend(group.coreConnectivityInfoList) + + return core_list + + def getAllCas(self): + """ + + **Description** + + Used to retrieve the list of :code:`(groupId, caContent)` pair for this discovery information. The retrieved + pairs could be from different Greengrass groups. This is designed for users who want to iterate through all + available cores/groups/CAs at the same time, regardless of which group those CAs belong to. + + **Syntax** + + .. code:: python + + myDiscoveryInfo.getAllCas() + + **Parameters** + + None + + **Returns** + + List of :code:`(groupId, caContent)` string pair, where :code:`caContent` is the CA content string and + :code:`groupId` is the group id that this CA belongs to. + + """ + group_list = self.getAllGroups() + ca_list = list() + + for group in group_list: + for ca in group.caList: + ca_list.append((group.groupId, ca)) + + return ca_list + + def getAllGroups(self): + """ + + **Description** + + Used to retrieve the list of :code:`AWSIoTPythonSDK.core.greengrass.discovery.models.GroupConnectivityInfo` + object for this discovery information. This is designed for users who want to iterate through all available + groups that this Greengrass aware device (GGAD) belongs to. + + **Syntax** + + .. code:: python + + myDiscoveryInfo.getAllGroups() + + **Parameters** + + None + + **Returns** + + List of :code:`AWSIoTPythonSDK.core.greengrass.discovery.models.GroupConnectivityInfo` object. + + """ + groups_dict = self.toObjectAtGroupLevel() + return list(groups_dict.values()) + + def toObjectAtGroupLevel(self): + """ + + **Description** + + Used to get a dictionary of Greengrass group discovery information, with group id string as key and the + corresponding :code:`AWSIoTPythonSDK.core.greengrass.discovery.models.GroupConnectivityInfo` object as the + value. This is designed for users who know exactly which group, which core and which set of connectivity info + they want to use for the Greengrass aware device to connect. + + **Syntax** + + .. code:: python + + # Get to the targeted connectivity information for a specific core in a specific group + groupLevelDiscoveryInfoObj = myDiscoveryInfo.toObjectAtGroupLevel() + groupConnectivityInfoObj = groupLevelDiscoveryInfoObj.toObjectAtGroupLevel("IKnowMyGroupId") + coreConnectivityInfoObj = groupConnectivityInfoObj.getCoreConnectivityInfo("IKnowMyCoreThingArn") + connectivityInfo = coreConnectivityInfoObj.getConnectivityInfo("IKnowMyConnectivityInfoSetId") + # Now retrieve the detailed information + caList = groupConnectivityInfoObj.caList + host = connectivityInfo.host + port = connectivityInfo.port + metadata = connectivityInfo.metadata + # Actual connecting logic follows... + + """ + groups_object = json.loads(self._raw_json) + groups_dict = dict() + + for group_object in groups_object[KEY_GROUP_LIST]: + group_info = self._decode_group_info(group_object) + groups_dict[group_info.groupId] = group_info + + return groups_dict + + def _decode_group_info(self, group_object): + group_id = group_object[KEY_GROUP_ID] + group_info = GroupConnectivityInfo(group_id) + + for core in group_object[KEY_CORE_LIST]: + core_info = self._decode_core_info(core, group_id) + group_info.appendCoreConnectivityInfo(core_info) + + for ca in group_object[KEY_CA_LIST]: + group_info.appendCa(ca) + + return group_info + + def _decode_core_info(self, core_object, group_id): + core_info = CoreConnectivityInfo(core_object[KEY_CORE_ARN], group_id) + + for connectivity_info_object in core_object[KEY_CONNECTIVITY_INFO_LIST]: + connectivity_info = ConnectivityInfo(connectivity_info_object[KEY_CONNECTIVITY_INFO_ID], + connectivity_info_object[KEY_HOST_ADDRESS], + connectivity_info_object[KEY_PORT_NUMBER], + connectivity_info_object[KEY_METADATA]) + core_info.appendConnectivityInfo(connectivity_info) + + return core_info diff --git a/AWSIoTPythonSDK/core/greengrass/discovery/providers.py b/AWSIoTPythonSDK/core/greengrass/discovery/providers.py new file mode 100644 index 0000000..eb72b90 --- /dev/null +++ b/AWSIoTPythonSDK/core/greengrass/discovery/providers.py @@ -0,0 +1,404 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + + +from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryInvalidRequestException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryUnauthorizedException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryDataNotFoundException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryThrottlingException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryTimeoutException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryFailure +from AWSIoTPythonSDK.core.greengrass.discovery.models import DiscoveryInfo +import re +import sys +import ssl +import time +import errno +import logging +import socket +import platform +if platform.system() == 'Windows': + EAGAIN = errno.WSAEWOULDBLOCK +else: + EAGAIN = errno.EAGAIN + + +class DiscoveryInfoProvider(object): + + REQUEST_TYPE_PREFIX = "GET " + PAYLOAD_PREFIX = "/greengrass/discover/thing/" + PAYLOAD_SUFFIX = " HTTP/1.1\r\n\r\n" # Space in the front + HTTP_PROTOCOL = r"HTTP/1.1 " + CONTENT_LENGTH = r"content-length: " + CONTENT_LENGTH_PATTERN = CONTENT_LENGTH + r"([0-9]+)\r\n" + HTTP_RESPONSE_CODE_PATTERN = HTTP_PROTOCOL + r"([0-9]+) " + + HTTP_SC_200 = "200" + HTTP_SC_400 = "400" + HTTP_SC_401 = "401" + HTTP_SC_404 = "404" + HTTP_SC_429 = "429" + + LOW_LEVEL_RC_COMPLETE = 0 + LOW_LEVEL_RC_TIMEOUT = -1 + + _logger = logging.getLogger(__name__) + + def __init__(self, caPath="", certPath="", keyPath="", host="", port=8443, timeoutSec=120): + """ + + The class that provides functionality to perform a Greengrass discovery process to the cloud. + + Users can perform Greengrass discovery process for a specific Greengrass aware device to retrieve + connectivity/identity information of Greengrass cores within the same group. + + **Syntax** + + .. code:: python + + from AWSIoTPythonSDK.core.greengrass.discovery.providers import DiscoveryInfoProvider + + # Create a discovery information provider + myDiscoveryInfoProvider = DiscoveryInfoProvider() + # Create a discovery information provider with custom configuration + myDiscoveryInfoProvider = DiscoveryInfoProvider(caPath=myCAPath, certPath=myCertPath, keyPath=myKeyPath, host=myHost, timeoutSec=myTimeoutSec) + + **Parameters** + + *caPath* - Path to read the root CA file. + + *certPath* - Path to read the certificate file. + + *keyPath* - Path to read the private key file. + + *host* - String that denotes the host name of the user-specific AWS IoT endpoint. + + *port* - Integer that denotes the port number to connect to. For discovery purpose, it is 8443 by default. + + *timeoutSec* - Time out configuration in seconds to consider a discovery request sending/response waiting has + been timed out. + + **Returns** + + AWSIoTPythonSDK.core.greengrass.discovery.providers.DiscoveryInfoProvider object + + """ + self._ca_path = caPath + self._cert_path = certPath + self._key_path = keyPath + self._host = host + self._port = port + self._timeout_sec = timeoutSec + self._expected_exception_map = { + self.HTTP_SC_400 : DiscoveryInvalidRequestException(), + self.HTTP_SC_401 : DiscoveryUnauthorizedException(), + self.HTTP_SC_404 : DiscoveryDataNotFoundException(), + self.HTTP_SC_429 : DiscoveryThrottlingException() + } + + def configureEndpoint(self, host, port=8443): + """ + + **Description** + + Used to configure the host address and port number for the discovery request to hit. Should be called before + the discovery request happens. + + **Syntax** + + .. code:: python + + # Using default port configuration, 8443 + myDiscoveryInfoProvider.configureEndpoint(host="prefix.iot.us-east-1.amazonaws.com") + # Customize port configuration + myDiscoveryInfoProvider.configureEndpoint(host="prefix.iot.us-east-1.amazonaws.com", port=8888) + + **Parameters** + + *host* - String that denotes the host name of the user-specific AWS IoT endpoint. + + *port* - Integer that denotes the port number to connect to. For discovery purpose, it is 8443 by default. + + **Returns** + + None + + """ + self._host = host + self._port = port + + def configureCredentials(self, caPath, certPath, keyPath): + """ + + **Description** + + Used to configure the credentials for discovery request. Should be called before the discovery request happens. + + **Syntax** + + .. code:: python + + myDiscoveryInfoProvider.configureCredentials("my/ca/path", "my/cert/path", "my/key/path") + + **Parameters** + + *caPath* - Path to read the root CA file. + + *certPath* - Path to read the certificate file. + + *keyPath* - Path to read the private key file. + + **Returns** + + None + + """ + self._ca_path = caPath + self._cert_path = certPath + self._key_path = keyPath + + def configureTimeout(self, timeoutSec): + """ + + **Description** + + Used to configure the time out in seconds for discovery request sending/response waiting. Should be called before + the discovery request happens. + + **Syntax** + + .. code:: python + + # Configure the time out for discovery to be 10 seconds + myDiscoveryInfoProvider.configureTimeout(10) + + **Parameters** + + *timeoutSec* - Time out configuration in seconds to consider a discovery request sending/response waiting has + been timed out. + + **Returns** + + None + + """ + self._timeout_sec = timeoutSec + + def discover(self, thingName): + """ + + **Description** + + Perform the discovery request for the given Greengrass aware device thing name. + + **Syntax** + + .. code:: python + + myDiscoveryInfoProvider.discover(thingName="myGGAD") + + **Parameters** + + *thingName* - Greengrass aware device thing name. + + **Returns** + + :code:`AWSIoTPythonSDK.core.greengrass.discovery.models.DiscoveryInfo` object. + + """ + self._logger.info("Starting discover request...") + self._logger.info("Endpoint: " + self._host + ":" + str(self._port)) + self._logger.info("Target thing: " + thingName) + sock = self._create_tcp_connection() + ssl_sock = self._create_ssl_connection(sock) + self._raise_on_timeout(self._send_discovery_request(ssl_sock, thingName)) + status_code, response_body = self._receive_discovery_response(ssl_sock) + + return self._raise_if_not_200(status_code, response_body) + + def _create_tcp_connection(self): + self._logger.debug("Creating tcp connection...") + try: + if (sys.version_info[0] == 2 and sys.version_info[1] < 7) or (sys.version_info[0] == 3 and sys.version_info[1] < 2): + sock = socket.create_connection((self._host, self._port)) + else: + sock = socket.create_connection((self._host, self._port), source_address=("", 0)) + return sock + except socket.error as err: + if err.errno != errno.EINPROGRESS and err.errno != errno.EWOULDBLOCK and err.errno != EAGAIN: + raise + self._logger.debug("Created tcp connection.") + + def _create_ssl_connection(self, sock): + self._logger.debug("Creating ssl connection...") + ssl_sock = ssl.wrap_socket(sock, + certfile=self._cert_path, + keyfile=self._key_path, + ca_certs=self._ca_path, + cert_reqs=ssl.CERT_REQUIRED, + ssl_version=ssl.PROTOCOL_SSLv23) + self._logger.debug("Matching host name...") + if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 2): + self._tls_match_hostname(ssl_sock) + else: + ssl.match_hostname(ssl_sock.getpeercert(), self._host) + + return ssl_sock + + def _tls_match_hostname(self, ssl_sock): + try: + cert = ssl_sock.getpeercert() + except AttributeError: + # the getpeercert can throw Attribute error: object has no attribute 'peer_certificate' + # Don't let that crash the whole client. See also: http://bugs.python.org/issue13721 + raise ssl.SSLError('Not connected') + + san = cert.get('subjectAltName') + if san: + have_san_dns = False + for (key, value) in san: + if key == 'DNS': + have_san_dns = True + if self._host_matches_cert(self._host.lower(), value.lower()) == True: + return + if key == 'IP Address': + have_san_dns = True + if value.lower() == self._host.lower(): + return + + if have_san_dns: + # Only check subject if subjectAltName dns not found. + raise ssl.SSLError('Certificate subject does not match remote hostname.') + subject = cert.get('subject') + if subject: + for ((key, value),) in subject: + if key == 'commonName': + if self._host_matches_cert(self._host.lower(), value.lower()) == True: + return + + raise ssl.SSLError('Certificate subject does not match remote hostname.') + + def _host_matches_cert(self, host, cert_host): + if cert_host[0:2] == "*.": + if cert_host.count("*") != 1: + return False + + host_match = host.split(".", 1)[1] + cert_match = cert_host.split(".", 1)[1] + if host_match == cert_match: + return True + else: + return False + else: + if host == cert_host: + return True + else: + return False + + def _send_discovery_request(self, ssl_sock, thing_name): + request = self.REQUEST_TYPE_PREFIX + \ + self.PAYLOAD_PREFIX + \ + thing_name + \ + self.PAYLOAD_SUFFIX + self._logger.debug("Sending discover request: " + request) + + start_time = time.time() + desired_length_to_write = len(request) + actual_length_written = 0 + while True: + try: + length_written = ssl_sock.write(request.encode("utf-8")) + actual_length_written += length_written + except socket.error as err: + if err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE: + pass + if actual_length_written == desired_length_to_write: + return self.LOW_LEVEL_RC_COMPLETE + if start_time + self._timeout_sec < time.time(): + return self.LOW_LEVEL_RC_TIMEOUT + + def _receive_discovery_response(self, ssl_sock): + self._logger.debug("Receiving discover response header...") + rc1, response_header = self._receive_until(ssl_sock, self._got_two_crlfs) + status_code, body_length = self._handle_discovery_response_header(rc1, response_header.decode("utf-8")) + + self._logger.debug("Receiving discover response body...") + rc2, response_body = self._receive_until(ssl_sock, self._got_enough_bytes, body_length) + response_body = self._handle_discovery_response_body(rc2, response_body.decode("utf-8")) + + return status_code, response_body + + def _receive_until(self, ssl_sock, criteria_function, extra_data=None): + start_time = time.time() + response = bytearray() + number_bytes_read = 0 + while True: # Python does not have do-while + try: + response.append(self._convert_to_int_py3(ssl_sock.read(1))) + number_bytes_read += 1 + except socket.error as err: + if err.errno == ssl.SSL_ERROR_WANT_READ or err.errno == ssl.SSL_ERROR_WANT_WRITE: + pass + + if criteria_function((number_bytes_read, response, extra_data)): + return self.LOW_LEVEL_RC_COMPLETE, response + if start_time + self._timeout_sec < time.time(): + return self.LOW_LEVEL_RC_TIMEOUT, response + + def _convert_to_int_py3(self, input_char): + try: + return ord(input_char) + except: + return input_char + + def _got_enough_bytes(self, data): + number_bytes_read, response, target_length = data + return number_bytes_read == int(target_length) + + def _got_two_crlfs(self, data): + number_bytes_read, response, extra_data_unused = data + number_of_crlf = 2 + has_enough_bytes = number_bytes_read > number_of_crlf * 2 - 1 + if has_enough_bytes: + end_of_received = response[number_bytes_read - number_of_crlf * 2 : number_bytes_read] + expected_end_of_response = b"\r\n" * number_of_crlf + return end_of_received == expected_end_of_response + else: + return False + + def _handle_discovery_response_header(self, rc, response): + self._raise_on_timeout(rc) + http_status_code_matcher = re.compile(self.HTTP_RESPONSE_CODE_PATTERN) + http_status_code_matched_groups = http_status_code_matcher.match(response) + content_length_matcher = re.compile(self.CONTENT_LENGTH_PATTERN) + content_length_matched_groups = content_length_matcher.search(response) + return http_status_code_matched_groups.group(1), content_length_matched_groups.group(1) + + def _handle_discovery_response_body(self, rc, response): + self._raise_on_timeout(rc) + return response + + def _raise_on_timeout(self, rc): + if rc == self.LOW_LEVEL_RC_TIMEOUT: + raise DiscoveryTimeoutException() + + def _raise_if_not_200(self, status_code, response_body): # response_body here is str in Py3 + if status_code != self.HTTP_SC_200: + expected_exception = self._expected_exception_map.get(status_code) + if expected_exception: + raise expected_exception + else: + raise DiscoveryFailure(response_body) + return DiscoveryInfo(response_body) diff --git a/AWSIoTPythonSDK/core/protocol/connection/__init__.py b/AWSIoTPythonSDK/core/protocol/connection/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/AWSIoTPythonSDK/core/protocol/paho/securedWebsocket/securedWebsocketCore.py b/AWSIoTPythonSDK/core/protocol/connection/cores.py old mode 100755 new mode 100644 similarity index 59% rename from AWSIoTPythonSDK/core/protocol/paho/securedWebsocket/securedWebsocketCore.py rename to AWSIoTPythonSDK/core/protocol/connection/cores.py index 8699f97..b3a9fc1 --- a/AWSIoTPythonSDK/core/protocol/paho/securedWebsocket/securedWebsocketCore.py +++ b/AWSIoTPythonSDK/core/protocol/connection/cores.py @@ -1,37 +1,274 @@ -''' -/* - * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * A copy of the License is located at - * - * http://aws.amazon.com/apache2.0 - * - * or in the "license" file accompanying this file. This file is distributed - * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either - * express or implied. See the License for the specific language governing - * permissions and limitations under the License. - */ - ''' - -# This class implements a simple secured websocket client -# with support for websocket handshake, frame encoding/decoding -# and Python paho-mqtt compatible low level socket I/O -# By now, we assume that for each MQTT packet over websocket, -# it will be wrapped into ONE websocket frame. Fragments of -# MQTT packet should be ignored. +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + +# This class implements the progressive backoff logic for auto-reconnect. +# It manages the reconnect wait time for the current reconnect, controling +# when to increase it and when to reset it. + -import os import sys import ssl import struct import socket import base64 +import time +import threading +import logging +import os +from datetime import datetime import hashlib -from AWSIoTPythonSDK.core.util.sigV4Core import sigV4Core +import hmac from AWSIoTPythonSDK.exception.AWSIoTExceptions import wssNoKeyInEnvironmentError from AWSIoTPythonSDK.exception.AWSIoTExceptions import wssHandShakeError +try: + from urllib.parse import quote # Python 3+ +except ImportError: + from urllib import quote +# INI config file handling +try: + from configparser import ConfigParser # Python 3+ + from configparser import NoOptionError + from configparser import NoSectionError +except ImportError: + from ConfigParser import ConfigParser + from ConfigParser import NoOptionError + from ConfigParser import NoSectionError + + +class ProgressiveBackOffCore: + # Logger + _logger = logging.getLogger(__name__) + + def __init__(self, srcBaseReconnectTimeSecond=1, srcMaximumReconnectTimeSecond=32, srcMinimumConnectTimeSecond=20): + # The base reconnection time in seconds, default 1 + self._baseReconnectTimeSecond = srcBaseReconnectTimeSecond + # The maximum reconnection time in seconds, default 32 + self._maximumReconnectTimeSecond = srcMaximumReconnectTimeSecond + # The minimum time in milliseconds that a connection must be maintained in order to be considered stable + # Default 20 + self._minimumConnectTimeSecond = srcMinimumConnectTimeSecond + # Current backOff time in seconds, init to equal to 0 + self._currentBackoffTimeSecond = 1 + # Handler for timer + self._resetBackoffTimer = None + + # For custom progressiveBackoff timing configuration + def configTime(self, srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond): + if srcBaseReconnectTimeSecond < 0 or srcMaximumReconnectTimeSecond < 0 or srcMinimumConnectTimeSecond < 0: + self._logger.error("init: Negative time configuration detected.") + raise ValueError("Negative time configuration detected.") + if srcBaseReconnectTimeSecond >= srcMinimumConnectTimeSecond: + self._logger.error("init: Min connect time should be bigger than base reconnect time.") + raise ValueError("Min connect time should be bigger than base reconnect time.") + self._baseReconnectTimeSecond = srcBaseReconnectTimeSecond + self._maximumReconnectTimeSecond = srcMaximumReconnectTimeSecond + self._minimumConnectTimeSecond = srcMinimumConnectTimeSecond + self._currentBackoffTimeSecond = 1 + + # Block the reconnect logic for _currentBackoffTimeSecond + # Update the currentBackoffTimeSecond for the next reconnect + # Cancel the in-waiting timer for resetting backOff time + # This should get called only when a disconnect/reconnect happens + def backOff(self): + self._logger.debug("backOff: current backoff time is: " + str(self._currentBackoffTimeSecond) + " sec.") + if self._resetBackoffTimer is not None: + # Cancel the timer + self._resetBackoffTimer.cancel() + # Block the reconnect logic + time.sleep(self._currentBackoffTimeSecond) + # Update the backoff time + if self._currentBackoffTimeSecond == 0: + # This is the first attempt to connect, set it to base + self._currentBackoffTimeSecond = self._baseReconnectTimeSecond + else: + # r_cur = min(2^n*r_base, r_max) + self._currentBackoffTimeSecond = min(self._maximumReconnectTimeSecond, self._currentBackoffTimeSecond * 2) + + # Start the timer for resetting _currentBackoffTimeSecond + # Will be cancelled upon calling backOff + def startStableConnectionTimer(self): + self._resetBackoffTimer = threading.Timer(self._minimumConnectTimeSecond, + self._connectionStableThenResetBackoffTime) + self._resetBackoffTimer.start() + + def stopStableConnectionTimer(self): + if self._resetBackoffTimer is not None: + # Cancel the timer + self._resetBackoffTimer.cancel() + + # Timer callback to reset _currentBackoffTimeSecond + # If the connection is stable for longer than _minimumConnectTimeSecond, + # reset the currentBackoffTimeSecond to _baseReconnectTimeSecond + def _connectionStableThenResetBackoffTime(self): + self._logger.debug( + "stableConnection: Resetting the backoff time to: " + str(self._baseReconnectTimeSecond) + " sec.") + self._currentBackoffTimeSecond = self._baseReconnectTimeSecond + + +class SigV4Core: + + _logger = logging.getLogger(__name__) + + def __init__(self): + self._aws_access_key_id = "" + self._aws_secret_access_key = "" + self._aws_session_token = "" + self._credentialConfigFilePath = "~/.aws/credentials" + + def setIAMCredentials(self, srcAWSAccessKeyID, srcAWSSecretAccessKey, srcAWSSessionToken): + self._aws_access_key_id = srcAWSAccessKeyID + self._aws_secret_access_key = srcAWSSecretAccessKey + self._aws_session_token = srcAWSSessionToken + + def _createAmazonDate(self): + # Returned as a unicode string in Py3.x + amazonDate = [] + currentTime = datetime.utcnow() + YMDHMS = currentTime.strftime('%Y%m%dT%H%M%SZ') + YMD = YMDHMS[0:YMDHMS.index('T')] + amazonDate.append(YMD) + amazonDate.append(YMDHMS) + return amazonDate + + def _sign(self, key, message): + # Returned as a utf-8 byte string in Py3.x + return hmac.new(key, message.encode('utf-8'), hashlib.sha256).digest() + + def _getSignatureKey(self, key, dateStamp, regionName, serviceName): + # Returned as a utf-8 byte string in Py3.x + kDate = self._sign(('AWS4' + key).encode('utf-8'), dateStamp) + kRegion = self._sign(kDate, regionName) + kService = self._sign(kRegion, serviceName) + kSigning = self._sign(kService, 'aws4_request') + return kSigning + + def _checkIAMCredentials(self): + # Check custom config + ret = self._checkKeyInCustomConfig() + # Check environment variables + if not ret: + ret = self._checkKeyInEnv() + # Check files + if not ret: + ret = self._checkKeyInFiles() + # All credentials returned as unicode strings in Py3.x + return ret + + def _checkKeyInEnv(self): + ret = dict() + self._aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID') + self._aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY') + self._aws_session_token = os.environ.get('AWS_SESSION_TOKEN') + if self._aws_access_key_id is not None and self._aws_secret_access_key is not None: + ret["aws_access_key_id"] = self._aws_access_key_id + ret["aws_secret_access_key"] = self._aws_secret_access_key + # We do not necessarily need session token... + if self._aws_session_token is not None: + ret["aws_session_token"] = self._aws_session_token + self._logger.debug("IAM credentials from env var.") + return ret + + def _checkKeyInINIDefault(self, srcConfigParser, sectionName): + ret = dict() + # Check aws_access_key_id and aws_secret_access_key + try: + ret["aws_access_key_id"] = srcConfigParser.get(sectionName, "aws_access_key_id") + ret["aws_secret_access_key"] = srcConfigParser.get(sectionName, "aws_secret_access_key") + except NoOptionError: + self._logger.warn("Cannot find IAM keyID/secretKey in credential file.") + # We do not continue searching if we cannot even get IAM id/secret right + if len(ret) == 2: + # Check aws_session_token, optional + try: + ret["aws_session_token"] = srcConfigParser.get(sectionName, "aws_session_token") + except NoOptionError: + self._logger.debug("No AWS Session Token found.") + return ret + + def _checkKeyInFiles(self): + credentialFile = None + credentialConfig = None + ret = dict() + # Should be compatible with aws cli default credential configuration + # *NIX/Windows + try: + # See if we get the file + credentialConfig = ConfigParser() + credentialFilePath = os.path.expanduser(self._credentialConfigFilePath) # Is it compatible with windows? \/ + credentialConfig.read(credentialFilePath) + # Now we have the file, start looking for credentials... + # 'default' section + ret = self._checkKeyInINIDefault(credentialConfig, "default") + if not ret: + # 'DEFAULT' section + ret = self._checkKeyInINIDefault(credentialConfig, "DEFAULT") + self._logger.debug("IAM credentials from file.") + except IOError: + self._logger.debug("No IAM credential configuration file in " + credentialFilePath) + except NoSectionError: + self._logger.error("Cannot find IAM 'default' section.") + return ret + + def _checkKeyInCustomConfig(self): + ret = dict() + if self._aws_access_key_id != "" and self._aws_secret_access_key != "": + ret["aws_access_key_id"] = self._aws_access_key_id + ret["aws_secret_access_key"] = self._aws_secret_access_key + # We do not necessarily need session token... + if self._aws_session_token != "": + ret["aws_session_token"] = self._aws_session_token + self._logger.debug("IAM credentials from custom config.") + return ret + + def createWebsocketEndpoint(self, host, port, region, method, awsServiceName, path): + # Return the endpoint as unicode string in 3.x + # Gather all the facts + amazonDate = self._createAmazonDate() + amazonDateSimple = amazonDate[0] # Unicode in 3.x + amazonDateComplex = amazonDate[1] # Unicode in 3.x + allKeys = self._checkIAMCredentials() # Unicode in 3.x + hasCredentialsNecessaryForWebsocket = "aws_access_key_id" in allKeys.keys() and "aws_secret_access_key" in allKeys.keys() + if not hasCredentialsNecessaryForWebsocket: + return "" + else: + keyID = allKeys["aws_access_key_id"] + secretKey = allKeys["aws_secret_access_key"] + queryParameters = "X-Amz-Algorithm=AWS4-HMAC-SHA256" + \ + "&X-Amz-Credential=" + keyID + "%2F" + amazonDateSimple + "%2F" + region + "%2F" + awsServiceName + "%2Faws4_request" + \ + "&X-Amz-Date=" + amazonDateComplex + \ + "&X-Amz-Expires=86400" + \ + "&X-Amz-SignedHeaders=host" # Unicode in 3.x + hashedPayload = hashlib.sha256(str("").encode('utf-8')).hexdigest() # Unicode in 3.x + # Create the string to sign + signedHeaders = "host" + canonicalHeaders = "host:" + host + "\n" + canonicalRequest = method + "\n" + path + "\n" + queryParameters + "\n" + canonicalHeaders + "\n" + signedHeaders + "\n" + hashedPayload # Unicode in 3.x + hashedCanonicalRequest = hashlib.sha256(str(canonicalRequest).encode('utf-8')).hexdigest() # Unicoede in 3.x + stringToSign = "AWS4-HMAC-SHA256\n" + amazonDateComplex + "\n" + amazonDateSimple + "/" + region + "/" + awsServiceName + "/aws4_request\n" + hashedCanonicalRequest # Unicode in 3.x + # Sign it + signingKey = self._getSignatureKey(secretKey, amazonDateSimple, region, awsServiceName) + signature = hmac.new(signingKey, (stringToSign).encode("utf-8"), hashlib.sha256).hexdigest() + # generate url + url = "wss://" + host + ":" + str(port) + path + '?' + queryParameters + "&X-Amz-Signature=" + signature + # See if we have STS token, if we do, add it + if "aws_session_token" in allKeys.keys(): + aws_session_token = allKeys["aws_session_token"] + url += "&X-Amz-Security-Token=" + quote(aws_session_token.encode("utf-8")) # Unicode in 3.x + self._logger.debug("createWebsocketEndpoint: Websocket URL: " + url) + return url + # This is an internal class that buffers the incoming bytes into an # internal buffer until it gets the full desired length of bytes. @@ -43,7 +280,7 @@ # For other errors, leave them to the paho _packet_read for error reporting. -class _bufferedReader: +class _BufferedReader: _sslSocket = None _internalBuffer = None _remainedLength = -1 @@ -76,6 +313,7 @@ def read(self, numberOfBytesToBeBuffered): self._reset() return ret # This should always be bytearray + # This is the internal class that sends requested data out chunk by chunk according # to the availablity of the socket write operation. If the requested bytes of data # (after encoding) needs to be sent out in separate socket write operations (most @@ -89,7 +327,7 @@ def read(self, numberOfBytesToBeBuffered): # For other errors, leave them to the paho _packet_read for error reporting. -class _bufferedWriter: +class _BufferedWriter: _sslSocket = None _internalBuffer = None _writingInProgress = False @@ -109,7 +347,7 @@ def _reset(self): # Input data for this function needs to be an encoded wss frame # Always request for packet[pos=0:] (raw MQTT data) def write(self, encodedData, payloadLength): - # encodedData should always be bytearray + # encodedData should always be bytearray # Check if we have a frame that is partially sent if not self._writingInProgress: self._internalBuffer = encodedData @@ -128,7 +366,7 @@ def write(self, encodedData, payloadLength): return 0 # Ensure that the 'pos' inside the MQTT packet never moves since we have not finished the transmission of this encoded frame -class securedWebsocketCore: +class SecuredWebSocketCore: # Websocket Constants _OP_CONTINUATION = 0x0 _OP_TEXT = 0x1 @@ -171,11 +409,11 @@ def __init__(self, socket, hostAddress, portNumber, AWSAccessKeyID="", AWSSecret except wssHandShakeError: raise ValueError("Websocket Handshake Error") # Now we have a socket with secured websocket... - self._bufferedReader = _bufferedReader(self._sslSocket) - self._bufferedWriter = _bufferedWriter(self._sslSocket) + self._bufferedReader = _BufferedReader(self._sslSocket) + self._bufferedWriter = _BufferedWriter(self._sslSocket) def _createSigV4Core(self): - return sigV4Core() + return SigV4Core() def _generateMaskKey(self): return bytearray(os.urandom(4)) @@ -326,7 +564,7 @@ def read(self, numberOfBytes): # struct.unpack(fmt, buffer) # Py3.x # Here ret is always in bytes (buffer interface) if sys.version_info[0] < 3: # Py2.x - ret = str(ret) + ret = str(ret) return ret # Emmm, We don't. Try to buffer from the socket (It's a new wss frame). if not self._hasOpByte: # Check if we need to buffer OpByte @@ -362,9 +600,9 @@ def read(self, numberOfBytes): payloadLengthExtended = self._bufferedReader.read(self._payloadLengthBytesLength) self._hasPayloadLengthExtended = True if sys.version_info[0] < 3: - payloadLengthExtended = str(payloadLengthExtended) + payloadLengthExtended = str(payloadLengthExtended) if self._payloadLengthBytesLength == 2: - self._payloadLength = struct.unpack("!H", payloadLengthExtended)[0] + self._payloadLength = struct.unpack("!H", payloadLengthExtended)[0] else: # _payloadLengthBytesLength == 8 self._payloadLength = struct.unpack("!Q", payloadLengthExtended)[0] @@ -401,7 +639,7 @@ def read(self, numberOfBytes): # struct.unpack(fmt, buffer) # Py3.x # Here ret is always in bytes (buffer interface) if sys.version_info[0] < 3: # Py2.x - ret = str(ret) + ret = str(ret) return ret else: # Fragmented MQTT packets in separate wss frames raise socket.error(ssl.SSL_ERROR_WANT_READ, "Not a complete MQTT packet payload within this wss frame.") diff --git a/AWSIoTPythonSDK/core/protocol/internal/__init__.py b/AWSIoTPythonSDK/core/protocol/internal/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/AWSIoTPythonSDK/core/protocol/internal/clients.py b/AWSIoTPythonSDK/core/protocol/internal/clients.py new file mode 100644 index 0000000..8badaac --- /dev/null +++ b/AWSIoTPythonSDK/core/protocol/internal/clients.py @@ -0,0 +1,233 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + +import ssl +import logging +from threading import Lock +from numbers import Number +import AWSIoTPythonSDK.core.protocol.paho.client as mqtt +from AWSIoTPythonSDK.core.protocol.paho.client import MQTT_ERR_SUCCESS +from AWSIoTPythonSDK.core.protocol.internal.events import FixedEventMids + + +class ClientStatus(object): + + IDLE = 0 + CONNECT = 1 + RESUBSCRIBE = 2 + DRAINING = 3 + STABLE = 4 + USER_DISCONNECT = 5 + ABNORMAL_DISCONNECT = 6 + + +class ClientStatusContainer(object): + + def __init__(self): + self._status = ClientStatus.IDLE + + def get_status(self): + return self._status + + def set_status(self, status): + if ClientStatus.USER_DISCONNECT == self._status: # If user requests to disconnect, no status updates other than user connect + if ClientStatus.CONNECT == status: + self._status = status + else: + self._status = status + + +class InternalAsyncMqttClient(object): + + _logger = logging.getLogger(__name__) + + def __init__(self, client_id, clean_session, protocol, use_wss): + self._paho_client = self._create_paho_client(client_id, clean_session, None, protocol, use_wss) + self._use_wss = use_wss + self._event_callback_map_lock = Lock() + self._event_callback_map = dict() + + def _create_paho_client(self, client_id, clean_session, user_data, protocol, use_wss): + self._logger.debug("Initializing MQTT layer...") + return mqtt.Client(client_id, clean_session, user_data, protocol, use_wss) + + # TODO: Merge credentials providers configuration into one + def set_cert_credentials_provider(self, cert_credentials_provider): + # History issue from Yun SDK where AR9331 embedded Linux only have Python 2.7.3 + # pre-installed. In this version, TLSv1_2 is not even an option. + # SSLv23 is a work-around which selects the highest TLS version between the client + # and service. If user installs opensslv1.0.1+, this option will work fine for Mutual + # Auth. + # Note that we cannot force TLSv1.2 for Mutual Auth. in Python 2.7.3 and TLS support + # in Python only starts from Python2.7. + # See also: https://docs.python.org/2/library/ssl.html#ssl.PROTOCOL_SSLv23 + if self._use_wss: + ca_path = cert_credentials_provider.get_ca_path() + self._paho_client.tls_set(ca_certs=ca_path, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_SSLv23) + else: + ca_path = cert_credentials_provider.get_ca_path() + cert_path = cert_credentials_provider.get_cert_path() + key_path = cert_credentials_provider.get_key_path() + self._paho_client.tls_set(ca_certs=ca_path,certfile=cert_path, keyfile=key_path, + cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_SSLv23) + + def set_iam_credentials_provider(self, iam_credentials_provider): + self._paho_client.configIAMCredentials(iam_credentials_provider.get_access_key_id(), + iam_credentials_provider.get_secret_access_key(), + iam_credentials_provider.get_session_token()) + + def set_endpoint_provider(self, endpoint_provider): + self._endpoint_provider = endpoint_provider + + def configure_last_will(self, topic, payload, qos, retain=False): + self._paho_client.will_set(topic, payload, qos, retain) + + def clear_last_will(self): + self._paho_client.will_clear() + + def configure_reconnect_back_off(self, base_reconnect_quiet_sec, max_reconnect_quiet_sec, stable_connection_sec): + self._paho_client.setBackoffTiming(base_reconnect_quiet_sec, max_reconnect_quiet_sec, stable_connection_sec) + + def connect(self, keep_alive_sec, ack_callback=None): + host = self._endpoint_provider.get_host() + port = self._endpoint_provider.get_port() + + with self._event_callback_map_lock: + self._logger.debug("Filling in fixed event callbacks: CONNACK, DISCONNECT, MESSAGE") + self._event_callback_map[FixedEventMids.CONNACK_MID] = self._create_combined_on_connect_callback(ack_callback) + self._event_callback_map[FixedEventMids.DISCONNECT_MID] = self._create_combined_on_disconnect_callback(None) + self._event_callback_map[FixedEventMids.MESSAGE_MID] = self._create_converted_on_message_callback() + + rc = self._paho_client.connect(host, port, keep_alive_sec) + if MQTT_ERR_SUCCESS == rc: + self.start_background_network_io() + + return rc + + def start_background_network_io(self): + self._logger.debug("Starting network I/O thread...") + self._paho_client.loop_start() + + def stop_background_network_io(self): + self._logger.debug("Stopping network I/O thread...") + self._paho_client.loop_stop() + + def disconnect(self, ack_callback=None): + with self._event_callback_map_lock: + rc = self._paho_client.disconnect() + if MQTT_ERR_SUCCESS == rc: + self._logger.debug("Filling in custom disconnect event callback...") + combined_on_disconnect_callback = self._create_combined_on_disconnect_callback(ack_callback) + self._event_callback_map[FixedEventMids.DISCONNECT_MID] = combined_on_disconnect_callback + return rc + + def _create_combined_on_connect_callback(self, ack_callback): + def combined_on_connect_callback(mid, data): + self.on_online() + if ack_callback: + ack_callback(mid, data) + return combined_on_connect_callback + + def _create_combined_on_disconnect_callback(self, ack_callback): + def combined_on_disconnect_callback(mid, data): + self.on_offline() + if ack_callback: + ack_callback(mid, data) + return combined_on_disconnect_callback + + def _create_converted_on_message_callback(self): + def converted_on_message_callback(mid, message): + self.on_message(message) + return converted_on_message_callback + + # For client online notification + def on_online(self): + pass + + # For client offline notification + def on_offline(self): + pass + + # For client message reception notification + def on_message(self, message): + pass + + def publish(self, topic, payload, qos, retain=False, ack_callback=None): + with self._event_callback_map_lock: + rc, mid = self._paho_client.publish(topic, payload, qos, retain) + if MQTT_ERR_SUCCESS == rc and qos > 0 and ack_callback: + self._logger.debug("Filling in custom puback (QoS>0) event callback...") + self._event_callback_map[mid] = ack_callback + return rc, mid + + def subscribe(self, topic, qos, ack_callback=None): + with self._event_callback_map_lock: + rc, mid = self._paho_client.subscribe(topic, qos) + if MQTT_ERR_SUCCESS == rc and ack_callback: + self._logger.debug("Filling in custom suback event callback...") + self._event_callback_map[mid] = ack_callback + return rc, mid + + def unsubscribe(self, topic, ack_callback=None): + with self._event_callback_map_lock: + rc, mid = self._paho_client.unsubscribe(topic) + if MQTT_ERR_SUCCESS == rc and ack_callback: + self._logger.debug("Filling in custom unsuback event callback...") + self._event_callback_map[mid] = ack_callback + return rc, mid + + def register_internal_event_callbacks(self, on_connect, on_disconnect, on_publish, on_subscribe, on_unsubscribe, on_message): + self._logger.debug("Registering internal event callbacks to MQTT layer...") + self._paho_client.on_connect = on_connect + self._paho_client.on_disconnect = on_disconnect + self._paho_client.on_publish = on_publish + self._paho_client.on_subscribe = on_subscribe + self._paho_client.on_unsubscribe = on_unsubscribe + self._paho_client.on_message = on_message + + def unregister_internal_event_callbacks(self): + self._logger.debug("Unregistering internal event callbacks from MQTT layer...") + self._paho_client.on_connect = None + self._paho_client.on_disconnect = None + self._paho_client.on_publish = None + self._paho_client.on_subscribe = None + self._paho_client.on_unsubscribe = None + self._paho_client.on_message = None + + def invoke_event_callback(self, mid, data=None): + with self._event_callback_map_lock: + event_callback = self._event_callback_map.get(mid) + if event_callback: + self._logger.debug("Invoking custom event callback...") + if data is not None: + event_callback(mid, data) + else: + event_callback(mid) + if isinstance(mid, Number): # Do NOT remove callbacks for CONNACK/DISCONNECT/MESSAGE + self._logger.debug("This custom event callback is for pub/sub/unsub, removing it after invocation...") + del self._event_callback_map[mid] + + def remove_event_callback(self, mid): + with self._event_callback_map_lock: + if mid in self._event_callback_map: + self._logger.debug("Removing custom event callback...") + del self._event_callback_map[mid] + + def clean_up_event_callbacks(self): + with self._event_callback_map_lock: + self._event_callback_map.clear() + + def get_event_callback_map(self): + return self._event_callback_map diff --git a/AWSIoTPythonSDK/core/protocol/internal/defaults.py b/AWSIoTPythonSDK/core/protocol/internal/defaults.py new file mode 100644 index 0000000..d355075 --- /dev/null +++ b/AWSIoTPythonSDK/core/protocol/internal/defaults.py @@ -0,0 +1,18 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + +DEFAULT_CONNECT_DISCONNECT_TIMEOUT_SEC = 30 +DEFAULT_OPERATION_TIMEOUT_SEC = 5 +DEFAULT_DRAINING_INTERNAL_SEC = 0.5 \ No newline at end of file diff --git a/AWSIoTPythonSDK/core/protocol/internal/events.py b/AWSIoTPythonSDK/core/protocol/internal/events.py new file mode 100644 index 0000000..90f0b70 --- /dev/null +++ b/AWSIoTPythonSDK/core/protocol/internal/events.py @@ -0,0 +1,29 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + +class EventTypes(object): + CONNACK = 0 + DISCONNECT = 1 + PUBACK = 2 + SUBACK = 3 + UNSUBACK = 4 + MESSAGE = 5 + + +class FixedEventMids(object): + CONNACK_MID = "CONNECTED" + DISCONNECT_MID = "DISCONNECTED" + MESSAGE_MID = "MESSAGE" + QUEUED_MID = "QUEUED" diff --git a/AWSIoTPythonSDK/core/protocol/internal/queues.py b/AWSIoTPythonSDK/core/protocol/internal/queues.py new file mode 100644 index 0000000..77046a8 --- /dev/null +++ b/AWSIoTPythonSDK/core/protocol/internal/queues.py @@ -0,0 +1,87 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + +import logging +from AWSIoTPythonSDK.core.util.enums import DropBehaviorTypes + + +class AppendResults(object): + APPEND_FAILURE_QUEUE_FULL = -1 + APPEND_FAILURE_QUEUE_DISABLED = -2 + APPEND_SUCCESS = 0 + + +class OfflineRequestQueue(list): + _logger = logging.getLogger(__name__) + + def __init__(self, max_size, drop_behavior=DropBehaviorTypes.DROP_NEWEST): + if not isinstance(max_size, int) or not isinstance(drop_behavior, int): + self._logger.error("init: MaximumSize/DropBehavior must be integer.") + raise TypeError("MaximumSize/DropBehavior must be integer.") + if drop_behavior != DropBehaviorTypes.DROP_OLDEST and drop_behavior != DropBehaviorTypes.DROP_NEWEST: + self._logger.error("init: Drop behavior not supported.") + raise ValueError("Drop behavior not supported.") + + list.__init__([]) + self._drop_behavior = drop_behavior + # When self._maximumSize > 0, queue is limited + # When self._maximumSize == 0, queue is disabled + # When self._maximumSize < 0. queue is infinite + self._max_size = max_size + + def _is_enabled(self): + return self._max_size != 0 + + def _need_drop_messages(self): + # Need to drop messages when: + # 1. Queue is limited and full + # 2. Queue is disabled + is_queue_full = len(self) >= self._max_size + is_queue_limited = self._max_size > 0 + is_queue_disabled = not self._is_enabled() + return (is_queue_full and is_queue_limited) or is_queue_disabled + + def set_behavior_drop_newest(self): + self._drop_behavior = DropBehaviorTypes.DROP_NEWEST + + def set_behavior_drop_oldest(self): + self._drop_behavior = DropBehaviorTypes.DROP_OLDEST + + # Override + # Append to a queue with a limited size. + # Return APPEND_SUCCESS if the append is successful + # Return APPEND_FAILURE_QUEUE_FULL if the append failed because the queue is full + # Return APPEND_FAILURE_QUEUE_DISABLED if the append failed because the queue is disabled + def append(self, data): + ret = AppendResults.APPEND_SUCCESS + if self._is_enabled(): + if self._need_drop_messages(): + # We should drop the newest + if DropBehaviorTypes.DROP_NEWEST == self._drop_behavior: + self._logger.warn("append: Full queue. Drop the newest: " + str(data)) + ret = AppendResults.APPEND_FAILURE_QUEUE_FULL + # We should drop the oldest + else: + current_oldest = super(OfflineRequestQueue, self).pop(0) + self._logger.warn("append: Full queue. Drop the oldest: " + str(current_oldest)) + super(OfflineRequestQueue, self).append(data) + ret = AppendResults.APPEND_FAILURE_QUEUE_FULL + else: + self._logger.debug("append: Add new element: " + str(data)) + super(OfflineRequestQueue, self).append(data) + else: + self._logger.debug("append: Queue is disabled. Drop the message: " + str(data)) + ret = AppendResults.APPEND_FAILURE_QUEUE_DISABLED + return ret diff --git a/AWSIoTPythonSDK/core/protocol/internal/requests.py b/AWSIoTPythonSDK/core/protocol/internal/requests.py new file mode 100644 index 0000000..bd2585d --- /dev/null +++ b/AWSIoTPythonSDK/core/protocol/internal/requests.py @@ -0,0 +1,27 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + +class RequestTypes(object): + CONNECT = 0 + DISCONNECT = 1 + PUBLISH = 2 + SUBSCRIBE = 3 + UNSUBSCRIBE = 4 + +class QueueableRequest(object): + + def __init__(self, type, data): + self.type = type + self.data = data # Can be a tuple diff --git a/AWSIoTPythonSDK/core/protocol/internal/workers.py b/AWSIoTPythonSDK/core/protocol/internal/workers.py new file mode 100644 index 0000000..656578a --- /dev/null +++ b/AWSIoTPythonSDK/core/protocol/internal/workers.py @@ -0,0 +1,284 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + +import time +import logging +from threading import Thread +from AWSIoTPythonSDK.core.protocol.internal.events import EventTypes +from AWSIoTPythonSDK.core.protocol.internal.events import FixedEventMids +from AWSIoTPythonSDK.core.protocol.internal.clients import ClientStatus +from AWSIoTPythonSDK.core.protocol.internal.queues import OfflineRequestQueue +from AWSIoTPythonSDK.core.protocol.internal.requests import RequestTypes +from AWSIoTPythonSDK.core.protocol.paho.client import topic_matches_sub +from AWSIoTPythonSDK.core.protocol.internal.defaults import DEFAULT_DRAINING_INTERNAL_SEC + + +class EventProducer(object): + + _logger = logging.getLogger(__name__) + + def __init__(self, cv, event_queue): + self._cv = cv + self._event_queue = event_queue + + def on_connect(self, client, user_data, flags, rc): + self._add_to_queue(FixedEventMids.CONNACK_MID, EventTypes.CONNACK, rc) + self._logger.debug("Produced [connack] event") + + def on_disconnect(self, client, user_data, rc): + self._add_to_queue(FixedEventMids.DISCONNECT_MID, EventTypes.DISCONNECT, rc) + self._logger.debug("Produced [disconnect] event") + + def on_publish(self, client, user_data, mid): + self._add_to_queue(mid, EventTypes.PUBACK, None) + self._logger.debug("Produced [puback] event") + + def on_subscribe(self, client, user_data, mid, granted_qos): + self._add_to_queue(mid, EventTypes.SUBACK, granted_qos) + self._logger.debug("Produced [suback] event") + + def on_unsubscribe(self, client, user_data, mid): + self._add_to_queue(mid, EventTypes.UNSUBACK, None) + self._logger.debug("Produced [unsuback] event") + + def on_message(self, client, user_data, message): + self._add_to_queue(FixedEventMids.MESSAGE_MID, EventTypes.MESSAGE, message) + self._logger.debug("Produced [message] event") + + def _add_to_queue(self, mid, event_type, data): + with self._cv: + self._event_queue.put((mid, event_type, data)) + self._cv.notify() + + +class EventConsumer(object): + + MAX_DISPATCH_INTERNAL_SEC = 0.01 + _logger = logging.getLogger(__name__) + + def __init__(self, cv, event_queue, internal_async_client, + subscription_manager, offline_requests_manager, client_status): + self._cv = cv + self._event_queue = event_queue + self._internal_async_client = internal_async_client + self._subscription_manager = subscription_manager + self._offline_requests_manager = offline_requests_manager + self._client_status = client_status + self._is_running = False + self._draining_interval_sec = DEFAULT_DRAINING_INTERNAL_SEC + self._dispatch_methods = { + EventTypes.CONNACK : self._dispatch_connack, + EventTypes.DISCONNECT : self._dispatch_disconnect, + EventTypes.PUBACK : self._dispatch_puback, + EventTypes.SUBACK : self._dispatch_suback, + EventTypes.UNSUBACK : self._dispatch_unsuback, + EventTypes.MESSAGE : self._dispatch_message + } + self._offline_request_handlers = { + RequestTypes.PUBLISH : self._handle_offline_publish, + RequestTypes.SUBSCRIBE : self._handle_offline_subscribe, + RequestTypes.UNSUBSCRIBE : self._handle_offline_unsubscribe + } + + def update_offline_requests_manager(self, offline_requests_manager): + self._offline_requests_manager = offline_requests_manager + + def update_draining_interval_sec(self, draining_interval_sec): + self._draining_interval_sec = draining_interval_sec + + def get_draining_interval_sec(self): + return self._draining_interval_sec + + def is_running(self): + return self._is_running + + def start(self): + self._is_running = True + dispatch_events = Thread(target=self._dispatch) + dispatch_events.daemon = True + dispatch_events.start() + self._logger.debug("Event consuming thread started") + + def stop(self): + if self._is_running: + self._is_running = False + self._clean_up() + self._logger.debug("Event consuming thread stopped") + + def _clean_up(self): + self._logger.debug("Cleaning up before stopping event consuming") + with self._event_queue.mutex: + self._event_queue.queue.clear() + self._logger.debug("Event queue cleared") + self._internal_async_client.stop_background_network_io() + self._logger.debug("Network thread stopped") + self._internal_async_client.clean_up_event_callbacks() + self._logger.debug("Event callbacks cleared") + + def _dispatch(self): + while self._is_running: + with self._cv: + if self._event_queue.empty(): + self._cv.wait(self.MAX_DISPATCH_INTERNAL_SEC) + else: + while not self._event_queue.empty(): + self._dispatch_one() + + def _dispatch_one(self): + mid, event_type, data = self._event_queue.get() + if mid: + self._dispatch_methods[event_type](mid, data) + self._internal_async_client.invoke_event_callback(mid, data=data) + # We need to make sure disconnect event gets dispatched and then we stop the consumer + if self._need_to_stop_dispatching(mid): + self.stop() + + def _need_to_stop_dispatching(self, mid): + status = self._client_status.get_status() + return (ClientStatus.USER_DISCONNECT == status or ClientStatus.CONNECT == status) \ + and mid == FixedEventMids.DISCONNECT_MID + + def _dispatch_connack(self, mid, rc): + status = self._client_status.get_status() + self._logger.debug("Dispatching [connack] event") + if self._need_recover(): + if ClientStatus.STABLE != status: # To avoid multiple connack dispatching + self._logger.debug("Has recovery job") + clean_up_debt = Thread(target=self._clean_up_debt) + clean_up_debt.start() + else: + self._logger.debug("No need for recovery") + self._client_status.set_status(ClientStatus.STABLE) + + def _need_recover(self): + return self._subscription_manager.list_records() or self._offline_requests_manager.has_more() + + def _clean_up_debt(self): + self._handle_resubscribe() + self._handle_draining() + self._client_status.set_status(ClientStatus.STABLE) + + def _handle_resubscribe(self): + subscriptions = self._subscription_manager.list_records() + if subscriptions and not self._has_user_disconnect_request(): + self._logger.debug("Start resubscribing") + self._client_status.set_status(ClientStatus.RESUBSCRIBE) + for topic, (qos, message_callback) in subscriptions: + if self._has_user_disconnect_request(): + self._logger.debug("User disconnect detected") + break + self._internal_async_client.subscribe(topic, qos) + + def _handle_draining(self): + if self._offline_requests_manager.has_more() and not self._has_user_disconnect_request(): + self._logger.debug("Start draining") + self._client_status.set_status(ClientStatus.DRAINING) + while self._offline_requests_manager.has_more(): + if self._has_user_disconnect_request(): + self._logger.debug("User disconnect detected") + break + offline_request = self._offline_requests_manager.get_next() + if offline_request: + self._offline_request_handlers[offline_request.type](offline_request) + time.sleep(self._draining_interval_sec) + + def _has_user_disconnect_request(self): + return ClientStatus.USER_DISCONNECT == self._client_status.get_status() + + def _dispatch_disconnect(self, mid, rc): + self._logger.debug("Dispatching [disconnect] event") + status = self._client_status.get_status() + if ClientStatus.USER_DISCONNECT == status or ClientStatus.CONNECT == status: + pass + else: + self._client_status.set_status(ClientStatus.ABNORMAL_DISCONNECT) + + # For puback, suback and unsuback, ack callback invocation is handled in dispatch_one + # Do nothing in the event dispatching itself + def _dispatch_puback(self, mid, rc): + self._logger.debug("Dispatching [puback] event") + + def _dispatch_suback(self, mid, rc): + self._logger.debug("Dispatching [suback] event") + + def _dispatch_unsuback(self, mid, rc): + self._logger.debug("Dispatching [unsuback] event") + + def _dispatch_message(self, mid, message): + self._logger.debug("Dispatching [message] event") + subscriptions = self._subscription_manager.list_records() + if subscriptions: + for topic, (qos, message_callback) in subscriptions: + if topic_matches_sub(topic, message.topic) and message_callback: + message_callback(None, None, message) # message_callback(client, userdata, message) + + def _handle_offline_publish(self, request): + topic, payload, qos, retain = request.data + self._internal_async_client.publish(topic, payload, qos, retain) + self._logger.debug("Processed offline publish request") + + def _handle_offline_subscribe(self, request): + topic, qos, message_callback = request.data + self._subscription_manager.add_record(topic, qos, message_callback) + self._internal_async_client.subscribe(topic, qos) + self._logger.debug("Processed offline subscribe request") + + def _handle_offline_unsubscribe(self, request): + topic = request.data + self._subscription_manager.remove_record(topic) + self._internal_async_client.unsubscribe(topic) + self._logger.debug("Processed offline unsubscribe request") + + +class SubscriptionManager(object): + + _logger = logging.getLogger(__name__) + + def __init__(self): + self._subscription_map = dict() + + def add_record(self, topic, qos, message_callback): + self._logger.debug("Adding a new subscription record: %s qos: %d", topic, qos) + self._subscription_map[topic] = qos, message_callback # message_callback could be None + + def remove_record(self, topic): + self._logger.debug("Removing subscription record: %s", topic) + if self._subscription_map.get(topic): # Ignore topics that are never subscribed to + del self._subscription_map[topic] + else: + self._logger.warn("Removing attempt for non-exist subscription record: %s", topic) + + def list_records(self): + return list(self._subscription_map.items()) + + +class OfflineRequestsManager(object): + + _logger = logging.getLogger(__name__) + + def __init__(self, max_size, drop_behavior): + self._queue = OfflineRequestQueue(max_size, drop_behavior) + + def has_more(self): + return len(self._queue) > 0 + + def add_one(self, request): + return self._queue.append(request) + + def get_next(self): + if self.has_more(): + return self._queue.pop(0) + else: + return None diff --git a/AWSIoTPythonSDK/core/protocol/mqttCore.py b/AWSIoTPythonSDK/core/protocol/mqttCore.py deleted file mode 100755 index 1c5a465..0000000 --- a/AWSIoTPythonSDK/core/protocol/mqttCore.py +++ /dev/null @@ -1,459 +0,0 @@ -# /* -# * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# * -# * Licensed under the Apache License, Version 2.0 (the "License"). -# * You may not use this file except in compliance with the License. -# * A copy of the License is located at -# * -# * http://aws.amazon.com/apache2.0 -# * -# * or in the "license" file accompanying this file. This file is distributed -# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# * express or implied. See the License for the specific language governing -# * permissions and limitations under the License. -# */ - -import sys -import ssl -import time -import logging -import threading -import AWSIoTPythonSDK.core.protocol.paho.client as mqtt -import AWSIoTPythonSDK.core.util.offlinePublishQueue as offlinePublishQueue -from threading import Lock -from AWSIoTPythonSDK.exception.AWSIoTExceptions import connectError -from AWSIoTPythonSDK.exception.AWSIoTExceptions import connectTimeoutException -from AWSIoTPythonSDK.exception.AWSIoTExceptions import disconnectError -from AWSIoTPythonSDK.exception.AWSIoTExceptions import disconnectTimeoutException -from AWSIoTPythonSDK.exception.AWSIoTExceptions import publishError -from AWSIoTPythonSDK.exception.AWSIoTExceptions import publishQueueFullException -from AWSIoTPythonSDK.exception.AWSIoTExceptions import publishQueueDisabledException -from AWSIoTPythonSDK.exception.AWSIoTExceptions import subscribeError -from AWSIoTPythonSDK.exception.AWSIoTExceptions import subscribeTimeoutException -from AWSIoTPythonSDK.exception.AWSIoTExceptions import unsubscribeError -from AWSIoTPythonSDK.exception.AWSIoTExceptions import unsubscribeTimeoutException - -# Class that holds queued publish request details -class _publishRequest: - def __init__(self, srcTopic, srcPayload, srcQos, srcRetain): - self.topic = srcTopic - self.payload = srcPayload - self.qos = srcQos - self.retain = srcRetain - - -class mqttCore: - - def getClientID(self): - return self._clientID - - def setConnectDisconnectTimeoutSecond(self, srcConnectDisconnectTimeout): - self._connectdisconnectTimeout = srcConnectDisconnectTimeout - self._log.debug("Set maximum connect/disconnect timeout to be " + str(self._connectdisconnectTimeout) + " second.") - - def getConnectDisconnectTimeoutSecond(self): - return self._connectdisconnectTimeout - - def setMQTTOperationTimeoutSecond(self, srcMQTTOperationTimeout): - self._mqttOperationTimeout = srcMQTTOperationTimeout - self._log.debug("Set maximum MQTT operation timeout to be " + str(self._mqttOperationTimeout) + " second") - - def getMQTTOperationTimeoutSecond(self): - return self._mqttOperationTimeout - - def setUserData(self, srcUserData): - self._pahoClient.user_data_set(srcUserData) - - def createPahoClient(self, clientID, cleanSession, userdata, protocol, useWebsocket): - return mqtt.Client(clientID, cleanSession, userdata, protocol, useWebsocket) # Throw exception when error happens - - def _doResubscribe(self): - if self._subscribePool: - self._resubscribeCount = len(self._subscribePool) # This is the only place where _resubscribeCount gets its count - for key in self._subscribePool.keys(): - qos, callback = self._subscribePool.get(key) - try: - self.subscribe(key, qos, callback) - time.sleep(self._drainingIntervalSecond) # Subscribe requests should also be sent out using the draining interval - except (subscribeError, subscribeTimeoutException): - self._log.warn("Error in re-subscription to topic: " + str(key)) - pass # Subscribe error resulted from network error, will redo subscription in the next re-connect - - # Performed in a seperate thread, draining the offlinePublishQueue at a given draining rate - # Publish theses queued messages to Paho - # Should always pop the queue since Paho has its own queueing and retry logic - # Should exit immediately when there is an error in republishing queued message - # Should leave it to the next round of reconnect/resubscribe/republish logic at mqttCore - def _doPublishDraining(self): - while True: - self._offlinePublishQueueLock.acquire() - # This should be a complete publish requests containing topic, payload, qos, retain information - # This is the only thread that pops the offlinePublishQueue - if self._offlinePublishQueue: - queuedPublishRequest = self._offlinePublishQueue.pop(0) - # Publish it (call paho API directly) - (rc, mid) = self._pahoClient.publish(queuedPublishRequest.topic, queuedPublishRequest.payload, queuedPublishRequest.qos, queuedPublishRequest.retain) - if rc != 0: - self._offlinePublishQueueLock.release() - break - else: - self._drainingComplete = True - self._offlinePublishQueueLock.release() - break - self._offlinePublishQueueLock.release() - time.sleep(self._drainingIntervalSecond) - - # Callbacks - def on_connect(self, client, userdata, flags, rc): - self._disconnectResultCode = sys.maxsize - self._connectResultCode = rc - if self._connectResultCode == 0: # If this is a successful connect, do resubscribe - processResubscription = threading.Thread(target=self._doResubscribe) - processResubscription.start() - # If we do not have any topics to resubscribe to, still start a new thread to process queued publish requests - if not self._subscribePool: - offlinePublishQueueDraining = threading.Thread(target=self._doPublishDraining) - offlinePublishQueueDraining.start() - self._log.debug("Connect result code " + str(rc)) - - def on_disconnect(self, client, userdata, rc): - self._connectResultCode = sys.maxsize - self._disconnectResultCode = rc - self._drainingComplete = False # Draining status should be reset when disconnect happens - self._log.debug("Disconnect result code " + str(rc)) - - def on_subscribe(self, client, userdata, mid, granted_qos): - # Execution of this callback is atomic, guaranteed by Paho - # Check if we have got all SUBACKs for all resubscriptions - self._log.debug("_resubscribeCount: " + str(self._resubscribeCount)) - if self._resubscribeCount > 0: # Check if there is actually a need for resubscribe - self._resubscribeCount -= 1 # collect SUBACK for all resubscriptions - if self._resubscribeCount == 0: - # start a thread draining the offline publish queue - offlinePublishQueueDraining = threading.Thread(target=self._doPublishDraining) - offlinePublishQueueDraining.start() - self._resubscribeCount = -1 # Recover the context for resubscribe - self._subscribeSent = True - self._log.debug("Subscribe request " + str(mid) + " sent.") - - def on_unsubscribe(self, client, userdata, mid): - self._unsubscribeSent = True - self._log.debug("Unsubscribe request " + str(mid) + " sent.") - - def on_message(self, client, userdata, message): - # Generic message callback - self._log.warn("Received (No custom callback registered) : message: " + str(message.payload) + " from topic: " + str(message.topic)) - - ####### API starts here ####### - def __init__(self, clientID, cleanSession, protocol, srcUseWebsocket=False): - if clientID is None or cleanSession is None or protocol is None: - raise TypeError("None type inputs detected.") - # All internal data member should be unique per mqttCore intance - # Tool handler - self._log = logging.getLogger(__name__) - self._clientID = clientID - self._pahoClient = self.createPahoClient(clientID, cleanSession, None, protocol, srcUseWebsocket) # User data is set to None as default - self._log.debug("Paho MQTT Client init.") - self._log.info("ClientID: " + str(clientID)) - protocolType = "MQTTv3.1.1" - if protocol == 3: - protocolType = "MQTTv3.1" - self._log.info("Protocol: " + protocolType) - self._pahoClient.on_connect = self.on_connect - self._pahoClient.on_disconnect = self.on_disconnect - self._pahoClient.on_message = self.on_message - self._pahoClient.on_subscribe = self.on_subscribe - self._pahoClient.on_unsubscribe = self.on_unsubscribe - self._log.debug("Register Paho MQTT Client callbacks.") - # Tool data structure - self._connectResultCode = sys.maxsize - self._disconnectResultCode = sys.maxsize - self._subscribeSent = False - self._unsubscribeSent = False - self._connectdisconnectTimeout = 30 # Default connect/disconnect timeout set to 30 second - self._mqttOperationTimeout = 5 # Default MQTT operation timeout set to 5 second - # Use Websocket - self._useWebsocket = srcUseWebsocket - # Subscribe record - self._subscribePool = dict() - self._resubscribeCount = -1 # Ensure that initial value for _resubscribeCount does not trigger draining on each SUBACK - # Broker information - self._host = "" - self._port = -1 - self._cafile = "" - self._key = "" - self._cert = "" - self._stsToken = "" - # Operation mutex - self._publishLock = Lock() - self._subscribeLock = Lock() - self._unsubscribeLock = Lock() - # OfflinePublishQueue - self._offlinePublishQueueLock = Lock() - self._offlinePublishQueue = offlinePublishQueue.offlinePublishQueue(20, 1) - # Draining interval in seconds - self._drainingIntervalSecond = 0.5 - # Is Draining complete - self._drainingComplete = True - self._log.debug("mqttCore init.") - - def configEndpoint(self, srcHost, srcPort): - if srcHost is None or srcPort is None: - self._log.error("configEndpoint: None type inputs detected.") - raise TypeError("None type inputs detected.") - self._host = srcHost - self._port = srcPort - - def configCredentials(self, srcCAFile, srcKey, srcCert): - if srcCAFile is None or srcKey is None or srcCert is None: - self._log.error("configCredentials: None type inputs detected.") - raise TypeError("None type inputs detected.") - self._cafile = srcCAFile - self._key = srcKey - self._cert = srcCert - self._log.debug("Load CAFile from: " + self._cafile) - self._log.debug("Load Key from: " + self._key) - self._log.debug("Load Cert from: " + self._cert) - - def configIAMCredentials(self, srcAWSAccessKeyID, srcAWSSecretAccessKey, srcAWSSessionToken): - if srcAWSSecretAccessKey is None or srcAWSSecretAccessKey is None or srcAWSSessionToken is None: - self._log.error("configIAMCredentials: None type inputs detected.") - raise TypeError("None type inputs detected.") - self._pahoClient.configIAMCredentials(srcAWSAccessKeyID, srcAWSSecretAccessKey, srcAWSSessionToken) - - def setLastWill(self, srcTopic, srcPayload, srcQos): - if srcTopic is None or srcPayload is None or srcQos is None: - self._log.error("setLastWill: None type inputs detected.") - raise TypeError("None type inputs detected.") - self._pahoClient.will_set(srcTopic, srcPayload, srcQos, False) - - def clearLastWill(self): - self._pahoClient.will_clear() - - def setBackoffTime(self, srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond): - if srcBaseReconnectTimeSecond is None or srcMaximumReconnectTimeSecond is None or srcMinimumConnectTimeSecond is None: - self._log.error("setBackoffTime: None type inputs detected.") - raise TypeError("None type inputs detected.") - # Below line could raise ValueError if input params are not properly selected - self._pahoClient.setBackoffTiming(srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond) - self._log.debug("Custom setting for backoff timing: baseReconnectTime = " + str(srcBaseReconnectTimeSecond) + " sec") - self._log.debug("Custom setting for backoff timing: maximumReconnectTime = " + str(srcMaximumReconnectTimeSecond) + " sec") - self._log.debug("Custom setting for backoff timing: minimumConnectTime = " + str(srcMinimumConnectTimeSecond) + " sec") - - def setOfflinePublishQueueing(self, srcQueueSize, srcDropBehavior=mqtt.MSG_QUEUEING_DROP_NEWEST): - if srcQueueSize is None or srcDropBehavior is None: - self._log.error("setOfflinePublishQueueing: None type inputs detected.") - raise TypeError("None type inputs detected.") - self._offlinePublishQueue = offlinePublishQueue.offlinePublishQueue(srcQueueSize, srcDropBehavior) - self._log.debug("Custom setting for publish queueing: queueSize = " + str(srcQueueSize)) - dropBehavior_word = "Drop Oldest" - if srcDropBehavior == 1: - dropBehavior_word = "Drop Newest" - self._log.debug("Custom setting for publish queueing: dropBehavior = " + dropBehavior_word) - - def setDrainingIntervalSecond(self, srcDrainingIntervalSecond): - if srcDrainingIntervalSecond is None: - self._log.error("setDrainingIntervalSecond: None type inputs detected.") - raise TypeError("None type inputs detected.") - if srcDrainingIntervalSecond < 0: - self._log.error("setDrainingIntervalSecond: Draining interval should not be negative.") - raise ValueError("Draining interval should not be negative.") - self._drainingIntervalSecond = srcDrainingIntervalSecond - self._log.debug("Custom setting for draining interval: " + str(srcDrainingIntervalSecond) + " sec") - - # MQTT connection - def connect(self, keepAliveInterval=30): - if keepAliveInterval is None : - self._log.error("connect: None type inputs detected.") - raise TypeError("None type inputs detected.") - if not isinstance(keepAliveInterval, int): - self._log.error("connect: Wrong input type detected. KeepAliveInterval must be an integer.") - raise TypeError("Non-integer type inputs detected.") - # Return connect succeeded/failed - ret = False - # TLS configuration - if self._useWebsocket: - # History issue from Yun SDK where AR9331 embedded Linux only have Python 2.7.3 - # pre-installed. In this version, TLSv1_2 is not even an option. - # SSLv23 is a work-around which selects the highest TLS version between the client - # and service. If user installs opensslv1.0.1+, this option will work fine for Mutal - # Auth. - # Note that we cannot force TLSv1.2 for Mutual Auth. in Python 2.7.3 and TLS support - # in Python only starts from Python2.7. - # See also: https://docs.python.org/2/library/ssl.html#ssl.PROTOCOL_SSLv23 - self._pahoClient.tls_set(ca_certs=self._cafile, cert_reqs=ssl.CERT_REQUIRED, tls_version=ssl.PROTOCOL_SSLv23) - self._log.info("Connection type: Websocket") - else: - self._pahoClient.tls_set(self._cafile, self._cert, self._key, ssl.CERT_REQUIRED, ssl.PROTOCOL_SSLv23) # Throw exception... - self._log.info("Connection type: TLSv1.2 Mutual Authentication") - # Connect - self._pahoClient.connect(self._host, self._port, keepAliveInterval) # Throw exception... - self._pahoClient.loop_start() - TenmsCount = 0 - while(TenmsCount != self._connectdisconnectTimeout * 100 and self._connectResultCode == sys.maxsize): - TenmsCount += 1 - time.sleep(0.01) - if(self._connectResultCode == sys.maxsize): - self._log.error("Connect timeout.") - self._pahoClient.loop_stop() - raise connectTimeoutException() - elif(self._connectResultCode == 0): - ret = True - self._log.info("Connected to AWS IoT.") - self._log.debug("Connect time consumption: " + str(float(TenmsCount) * 10) + "ms.") - else: - self._log.error("A connect error happened: " + str(self._connectResultCode)) - self._pahoClient.loop_stop() - raise connectError(self._connectResultCode) - return ret - - def disconnect(self): - # Return disconnect succeeded/failed - ret = False - # Disconnect - self._pahoClient.disconnect() # Throw exception... - TenmsCount = 0 - while(TenmsCount != self._connectdisconnectTimeout * 100 and self._disconnectResultCode == sys.maxsize): - TenmsCount += 1 - time.sleep(0.01) - if(self._disconnectResultCode == sys.maxsize): - self._log.error("Disconnect timeout.") - raise disconnectTimeoutException() - elif(self._disconnectResultCode == 0): - ret = True - self._log.info("Disconnected.") - self._log.debug("Disconnect time consumption: " + str(float(TenmsCount) * 10) + "ms.") - self._pahoClient.loop_stop() # Do NOT maintain a background thread for socket communication since it is a successful disconnect - else: - self._log.error("A disconnect error happened: " + str(self._disconnectResultCode)) - raise disconnectError(self._disconnectResultCode) - return ret - - def publish(self, topic, payload, qos, retain): - if(topic is None or payload is None or qos is None or retain is None): - self._log.error("publish: None type inputs detected.") - raise TypeError("None type inputs detected.") - # Return publish succeeded/failed - ret = False - # Queueing should happen when disconnected or draining is in progress - self._offlinePublishQueueLock.acquire() - queuedPublishCondition = not self._drainingComplete or self._connectResultCode == sys.maxsize - if queuedPublishCondition: - if self._connectResultCode == sys.maxsize: - self._log.info("Offline publish request detected.") - # If the client is connected but draining is not completed... - elif not self._drainingComplete: - self._log.info("Drainging is still on-going.") - self._log.info("Try queueing up this request...") - # Publish to the queue and report error (raise Exception) - currentQueuedPublishRequest = _publishRequest(topic, payload, qos, retain) - # Try to append the element... - appendResult = self._offlinePublishQueue.append(currentQueuedPublishRequest) - # When the queue is full... - if appendResult == self._offlinePublishQueue.APPEND_FAILURE_QUEUE_FULL: - self._offlinePublishQueueLock.release() - raise publishQueueFullException() - # When the queue is disabled... - elif appendResult == self._offlinePublishQueue.APPEND_FAILURE_QUEUE_DISABLED: - self._offlinePublishQueueLock.release() - raise publishQueueDisabledException() - # When the queue is good... - else: - self._offlinePublishQueueLock.release() - # Publish to Paho - else: - self._offlinePublishQueueLock.release() - self._publishLock.acquire() - # Publish - (rc, mid) = self._pahoClient.publish(topic, payload, qos, retain) # Throw exception... - self._log.debug("Try to put a publish request " + str(mid) + " in the TCP stack.") - ret = rc == 0 - if(ret): - self._log.debug("Publish request " + str(mid) + " succeeded.") - else: - self._log.error("Publish request " + str(mid) + " failed with code: " + str(rc)) - self._publishLock.release() # Release the lock when exception is raised - raise publishError(rc) - self._publishLock.release() - return ret - - def subscribe(self, topic, qos, callback): - if(topic is None or qos is None): - self._log.error("subscribe: None type inputs detected.") - raise TypeError("None type inputs detected.") - # Return subscribe succeeded/failed - ret = False - self._subscribeLock.acquire() - # Subscribe - # Register callback - if(callback is not None): - self._pahoClient.message_callback_add(topic, callback) - (rc, mid) = self._pahoClient.subscribe(topic, qos) # Throw exception... - self._log.debug("Started a subscribe request " + str(mid)) - TenmsCount = 0 - while(TenmsCount != self._mqttOperationTimeout * 100 and not self._subscribeSent): - TenmsCount += 1 - time.sleep(0.01) - if(self._subscribeSent): - ret = rc == 0 - if(ret): - self._subscribePool[topic] = (qos, callback) - self._log.debug("Subscribe request " + str(mid) + " succeeded. Time consumption: " + str(float(TenmsCount) * 10) + "ms.") - else: - if(callback is not None): - self._pahoClient.message_callback_remove(topic) - self._log.error("Subscribe request " + str(mid) + " failed with code: " + str(rc)) - self._log.debug("Callback cleaned up.") - self._subscribeLock.release() # Release the lock when exception is raised - raise subscribeError(rc) - else: - # Subscribe timeout - if(callback is not None): - self._pahoClient.message_callback_remove(topic) - self._log.error("No feedback detected for subscribe request " + str(mid) + ". Timeout and failed.") - self._log.debug("Callback cleaned up.") - self._subscribeLock.release() # Release the lock when exception is raised - raise subscribeTimeoutException() - self._subscribeSent = False - self._log.debug("Recover subscribe context for the next request: subscribeSent: " + str(self._subscribeSent)) - self._subscribeLock.release() - return ret - - def unsubscribe(self, topic): - if(topic is None): - self._log.error("unsubscribe: None type inputs detected.") - raise TypeError("None type inputs detected.") - self._log.debug("unsubscribe from: " + topic) - # Return unsubscribe succeeded/failed - ret = False - self._unsubscribeLock.acquire() - # Unsubscribe - (rc, mid) = self._pahoClient.unsubscribe(topic) # Throw exception... - self._log.debug("Started an unsubscribe request " + str(mid)) - TenmsCount = 0 - while(TenmsCount != self._mqttOperationTimeout * 100 and not self._unsubscribeSent): - TenmsCount += 1 - time.sleep(0.01) - if(self._unsubscribeSent): - ret = rc == 0 - if(ret): - try: - del self._subscribePool[topic] - except KeyError: - pass # Ignore topics that are never subscribed to - self._log.debug("Unsubscribe request " + str(mid) + " succeeded. Time consumption: " + str(float(TenmsCount) * 10) + "ms.") - self._pahoClient.message_callback_remove(topic) - self._log.debug("Remove the callback.") - else: - self._log.error("Unsubscribe request " + str(mid) + " failed with code: " + str(rc)) - self._unsubscribeLock.release() # Release the lock when exception is raised - raise unsubscribeError(rc) - else: - # Unsubscribe timeout - self._log.error("No feedback detected for unsubscribe request " + str(mid) + ". Timeout and failed.") - self._unsubscribeLock.release() # Release the lock when exception is raised - raise unsubscribeTimeoutException() - self._unsubscribeSent = False - self._log.debug("Recover unsubscribe context for the next request: unsubscribeSent: " + str(self._unsubscribeSent)) - self._unsubscribeLock.release() - return ret diff --git a/AWSIoTPythonSDK/core/protocol/mqtt_core.py b/AWSIoTPythonSDK/core/protocol/mqtt_core.py new file mode 100644 index 0000000..b8524d4 --- /dev/null +++ b/AWSIoTPythonSDK/core/protocol/mqtt_core.py @@ -0,0 +1,321 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + +from AWSIoTPythonSDK.core.protocol.internal.clients import InternalAsyncMqttClient +from AWSIoTPythonSDK.core.protocol.internal.clients import ClientStatusContainer +from AWSIoTPythonSDK.core.protocol.internal.clients import ClientStatus +from AWSIoTPythonSDK.core.protocol.internal.workers import EventProducer +from AWSIoTPythonSDK.core.protocol.internal.workers import EventConsumer +from AWSIoTPythonSDK.core.protocol.internal.workers import SubscriptionManager +from AWSIoTPythonSDK.core.protocol.internal.workers import OfflineRequestsManager +from AWSIoTPythonSDK.core.protocol.internal.requests import RequestTypes +from AWSIoTPythonSDK.core.protocol.internal.requests import QueueableRequest +from AWSIoTPythonSDK.core.protocol.internal.defaults import DEFAULT_CONNECT_DISCONNECT_TIMEOUT_SEC +from AWSIoTPythonSDK.core.protocol.internal.defaults import DEFAULT_OPERATION_TIMEOUT_SEC +from AWSIoTPythonSDK.core.protocol.internal.events import FixedEventMids +from AWSIoTPythonSDK.core.protocol.paho.client import MQTT_ERR_SUCCESS +from AWSIoTPythonSDK.exception.AWSIoTExceptions import connectError +from AWSIoTPythonSDK.exception.AWSIoTExceptions import connectTimeoutException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import disconnectError +from AWSIoTPythonSDK.exception.AWSIoTExceptions import disconnectTimeoutException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import publishError +from AWSIoTPythonSDK.exception.AWSIoTExceptions import publishTimeoutException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import publishQueueFullException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import publishQueueDisabledException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import subscribeQueueFullException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import subscribeQueueDisabledException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import unsubscribeQueueFullException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import unsubscribeQueueDisabledException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import subscribeError +from AWSIoTPythonSDK.exception.AWSIoTExceptions import subscribeTimeoutException +from AWSIoTPythonSDK.exception.AWSIoTExceptions import unsubscribeError +from AWSIoTPythonSDK.exception.AWSIoTExceptions import unsubscribeTimeoutException +from AWSIoTPythonSDK.core.protocol.internal.queues import AppendResults +from AWSIoTPythonSDK.core.util.enums import DropBehaviorTypes +from AWSIoTPythonSDK.core.protocol.paho.client import MQTTv31 +from threading import Condition +from threading import Event +import logging +import sys +if sys.version_info[0] < 3: + from Queue import Queue +else: + from queue import Queue + + +class MqttCore(object): + + _logger = logging.getLogger(__name__) + + def __init__(self, client_id, clean_session, protocol, use_wss): + self._event_queue = Queue() + self._event_cv = Condition() + self._event_producer = EventProducer(self._event_cv, self._event_queue) + self._client_status = ClientStatusContainer() + self._internal_async_client = InternalAsyncMqttClient(client_id, clean_session, protocol, use_wss) + self._subscription_manager = SubscriptionManager() + self._offline_requests_manager = OfflineRequestsManager(-1, DropBehaviorTypes.DROP_NEWEST) # Infinite queue + self._event_consumer = EventConsumer(self._event_cv, + self._event_queue, + self._internal_async_client, + self._subscription_manager, + self._offline_requests_manager, + self._client_status) + self._connect_disconnect_timeout_sec = DEFAULT_CONNECT_DISCONNECT_TIMEOUT_SEC + self._operation_timeout_sec = DEFAULT_OPERATION_TIMEOUT_SEC + self._init_offline_request_exceptions() + self._init_workers() + self._start_workers() + self._logger.info("MqttCore initialized") + self._logger.info("Client id: %s" % client_id) + self._logger.info("Protocol version: %s" % ("MQTTv3.1" if protocol == MQTTv31 else "MQTTv3.1.1")) + self._logger.info("Authentication type: %s" % ("SigV4 WebSocket" if use_wss else "TLSv1.2 certificate based Mutual Auth.")) + + def _init_offline_request_exceptions(self): + self._offline_request_queue_disabled_exceptions = { + RequestTypes.PUBLISH : publishQueueDisabledException(), + RequestTypes.SUBSCRIBE : subscribeQueueDisabledException(), + RequestTypes.UNSUBSCRIBE : unsubscribeQueueDisabledException() + } + self._offline_request_queue_full_exceptions = { + RequestTypes.PUBLISH : publishQueueFullException(), + RequestTypes.SUBSCRIBE : subscribeQueueFullException(), + RequestTypes.UNSUBSCRIBE : unsubscribeQueueFullException() + } + + def _init_workers(self): + self._internal_async_client.register_internal_event_callbacks(self._event_producer.on_connect, + self._event_producer.on_disconnect, + self._event_producer.on_publish, + self._event_producer.on_subscribe, + self._event_producer.on_unsubscribe, + self._event_producer.on_message) + + def _start_workers(self): + self._event_consumer.start() + + # Used for general message event reception + def on_message(self, message): + pass + + # Used for general online event notification + def on_online(self): + pass + + # Used for general offline event notification + def on_offline(self): + pass + + def configure_cert_credentials(self, cert_credentials_provider): + self._logger.info("Configuring certificates...") + self._internal_async_client.set_cert_credentials_provider(cert_credentials_provider) + + def configure_iam_credentials(self, iam_credentials_provider): + self._logger.info("Configuring custom IAM credentials...") + self._internal_async_client.set_iam_credentials_provider(iam_credentials_provider) + + def configure_endpoint(self, endpoint_provider): + self._logger.info("Configuring endpoint...") + self._internal_async_client.set_endpoint_provider(endpoint_provider) + + def configure_connect_disconnect_timeout_sec(self, connect_disconnect_timeout_sec): + self._logger.info("Configuring connect/disconnect time out: %f sec" % connect_disconnect_timeout_sec) + self._connect_disconnect_timeout_sec = connect_disconnect_timeout_sec + + def configure_operation_timeout_sec(self, operation_timeout_sec): + self._logger.info("Configuring MQTT operation time out: %f sec" % operation_timeout_sec) + self._operation_timeout_sec = operation_timeout_sec + + def configure_reconnect_back_off(self, base_reconnect_quiet_sec, max_reconnect_quiet_sec, stable_connection_sec): + self._logger.info("Configuring reconnect back off timing...") + self._logger.info("Base quiet time: %f sec" % base_reconnect_quiet_sec) + self._logger.info("Max quiet time: %f sec" % max_reconnect_quiet_sec) + self._logger.info("Stable connection time: %f sec" % stable_connection_sec) + self._internal_async_client.configure_reconnect_back_off(base_reconnect_quiet_sec, max_reconnect_quiet_sec, stable_connection_sec) + + def configure_last_will(self, topic, payload, qos, retain=False): + self._logger.info("Configuring last will...") + self._internal_async_client.configure_last_will(topic, payload, qos, retain) + + def clear_last_will(self): + self._logger.info("Clearing last will...") + self._internal_async_client.clear_last_will() + + def configure_offline_requests_queue(self, max_size, drop_behavior): + self._logger.info("Configuring offline requests queueing: max queue size: %d", max_size) + self._offline_requests_manager = OfflineRequestsManager(max_size, drop_behavior) + self._event_consumer.update_offline_requests_manager(self._offline_requests_manager) + + def configure_draining_interval_sec(self, draining_interval_sec): + self._logger.info("Configuring offline requests queue draining interval: %f sec", draining_interval_sec) + self._event_consumer.update_draining_interval_sec(draining_interval_sec) + + def connect(self, keep_alive_sec): + self._logger.info("Performing sync connect...") + event = Event() + self.connect_async(keep_alive_sec, self._create_blocking_ack_callback(event)) + if not event.wait(self._connect_disconnect_timeout_sec): + self._logger.error("Connect timed out") + raise connectTimeoutException() + return True + + def connect_async(self, keep_alive_sec, ack_callback=None): + self._logger.info("Performing async connect...") + self._logger.info("Keep-alive: %f sec" % keep_alive_sec) + self._load_callbacks() + self._client_status.set_status(ClientStatus.CONNECT) + rc = self._internal_async_client.connect(keep_alive_sec, ack_callback) + if MQTT_ERR_SUCCESS != rc: + self._logger.error("Connect error: %d", rc) + raise connectError(rc) + return FixedEventMids.CONNACK_MID + + def _load_callbacks(self): + self._logger.debug("Passing in general notification callbacks to internal client...") + self._internal_async_client.on_online = self.on_online + self._internal_async_client.on_offline = self.on_offline + self._internal_async_client.on_message = self.on_message + + def disconnect(self): + self._logger.info("Performing sync disconnect...") + event = Event() + self.disconnect_async(self._create_blocking_ack_callback(event)) + if not event.wait(self._connect_disconnect_timeout_sec): + self._logger.error("Disconnect timed out") + raise disconnectTimeoutException() + return True + + def disconnect_async(self, ack_callback=None): + self._logger.info("Performing async disconnect...") + self._client_status.set_status(ClientStatus.USER_DISCONNECT) + rc = self._internal_async_client.disconnect(ack_callback) + if MQTT_ERR_SUCCESS != rc: + self._logger.error("Disconnect error: %d", rc) + raise disconnectError(rc) + return FixedEventMids.DISCONNECT_MID + + def publish(self, topic, payload, qos, retain=False): + self._logger.info("Performing sync publish...") + ret = False + if ClientStatus.STABLE != self._client_status.get_status(): + self._handle_offline_request(RequestTypes.PUBLISH, (topic, payload, qos, retain)) + else: + if qos > 0: + event = Event() + rc, mid = self._publish_async(topic, payload, qos, retain, self._create_blocking_ack_callback(event)) + if not event.wait(self._operation_timeout_sec): + self._internal_async_client.remove_event_callback(mid) + self._logger.error("Publish timed out") + raise publishTimeoutException() + else: + self._publish_async(topic, payload, qos, retain) + ret = True + return ret + + def publish_async(self, topic, payload, qos, retain=False, ack_callback=None): + self._logger.info("Performing async publish...") + if ClientStatus.STABLE != self._client_status.get_status(): + self._handle_offline_request(RequestTypes.PUBLISH, (topic, payload, qos, retain)) + return FixedEventMids.QUEUED_MID + else: + rc, mid = self._publish_async(topic, payload, qos, retain, ack_callback) + return mid + + def _publish_async(self, topic, payload, qos, retain=False, ack_callback=None): + rc, mid = self._internal_async_client.publish(topic, payload, qos, retain, ack_callback) + if MQTT_ERR_SUCCESS != rc: + self._logger.error("Publish error: %d", rc) + raise publishError(rc) + return rc, mid + + def subscribe(self, topic, qos, message_callback=None): + self._logger.info("Performing sync subscribe...") + ret = False + if ClientStatus.STABLE != self._client_status.get_status(): + self._handle_offline_request(RequestTypes.SUBSCRIBE, (topic, qos, message_callback)) + else: + event = Event() + rc, mid = self._subscribe_async(topic, qos, self._create_blocking_ack_callback(event), message_callback) + if not event.wait(self._operation_timeout_sec): + self._internal_async_client.remove_event_callback(mid) + self._logger.error("Subscribe timed out") + raise subscribeTimeoutException() + ret = True + return ret + + def subscribe_async(self, topic, qos, ack_callback=None, message_callback=None): + self._logger.info("Performing async subscribe...") + if ClientStatus.STABLE != self._client_status.get_status(): + self._handle_offline_request(RequestTypes.SUBSCRIBE, (topic, qos, message_callback)) + return FixedEventMids.QUEUED_MID + else: + rc, mid = self._subscribe_async(topic, qos, ack_callback, message_callback) + return mid + + def _subscribe_async(self, topic, qos, ack_callback=None, message_callback=None): + self._subscription_manager.add_record(topic, qos, message_callback) + rc, mid = self._internal_async_client.subscribe(topic, qos, ack_callback) + if MQTT_ERR_SUCCESS != rc: + self._logger.error("Subscribe error: %d", rc) + raise subscribeError(rc) + return rc, mid + + def unsubscribe(self, topic): + self._logger.info("Performing sync unsubscribe...") + ret = False + if ClientStatus.STABLE != self._client_status.get_status(): + self._handle_offline_request(RequestTypes.UNSUBSCRIBE, topic) + else: + event = Event() + rc, mid = self._unsubscribe_async(topic, self._create_blocking_ack_callback(event)) + if not event.wait(self._operation_timeout_sec): + self._internal_async_client.remove_event_callback(mid) + self._logger.error("Unsubscribe timed out") + raise unsubscribeTimeoutException() + ret = True + return ret + + def unsubscribe_async(self, topic, ack_callback=None): + self._logger.info("Performing async unsubscribe...") + if ClientStatus.STABLE != self._client_status.get_status(): + self._handle_offline_request(RequestTypes.UNSUBSCRIBE, topic) + return FixedEventMids.QUEUED_MID + else: + rc, mid = self._unsubscribe_async(topic, ack_callback) + return mid + + def _unsubscribe_async(self, topic, ack_callback=None): + self._subscription_manager.remove_record(topic) + rc, mid = self._internal_async_client.unsubscribe(topic, ack_callback) + if MQTT_ERR_SUCCESS != rc: + self._logger.error("Unsubscribe error: %d", rc) + raise unsubscribeError(rc) + return rc, mid + + def _create_blocking_ack_callback(self, event): + def ack_callback(mid, data=None): + event.set() + return ack_callback + + def _handle_offline_request(self, type, data): + self._logger.info("Offline request detected!") + offline_request = QueueableRequest(type, data) + append_result = self._offline_requests_manager.add_one(offline_request) + if AppendResults.APPEND_FAILURE_QUEUE_DISABLED == append_result: + self._logger.error("Offline request queue has been disabled") + raise self._offline_request_queue_disabled_exceptions[type] + if AppendResults.APPEND_FAILURE_QUEUE_FULL == append_result: + self._logger.error("Offline request queue is full") + raise self._offline_request_queue_full_exceptions[type] diff --git a/AWSIoTPythonSDK/core/protocol/paho/client.py b/AWSIoTPythonSDK/core/protocol/paho/client.py index 6096aa4..9787984 100755 --- a/AWSIoTPythonSDK/core/protocol/paho/client.py +++ b/AWSIoTPythonSDK/core/protocol/paho/client.py @@ -44,10 +44,9 @@ EAGAIN = errno.WSAEWOULDBLOCK else: EAGAIN = errno.EAGAIN -# AWS WSS implementation -import AWSIoTPythonSDK.core.protocol.paho.securedWebsocket.securedWebsocketCore as wssCore -import AWSIoTPythonSDK.core.util.progressiveBackoffCore as backoffCore -import AWSIoTPythonSDK.core.util.offlinePublishQueue as offlinePublishQueue + +from AWSIoTPythonSDK.core.protocol.connection.cores import ProgressiveBackOffCore +from AWSIoTPythonSDK.core.protocol.connection.cores import SecuredWebSocketCore VERSION_MAJOR=1 VERSION_MINOR=0 @@ -503,7 +502,7 @@ def __init__(self, client_id="", clean_session=True, userdata=None, protocol=MQT self._tls_version = tls_version self._tls_insecure = False self._useSecuredWebsocket = useSecuredWebsocket # Do we enable secured websocket - self._backoffCore = backoffCore.progressiveBackoffCore() # Init the backoffCore using default configuration + self._backoffCore = ProgressiveBackOffCore() # Init the backoffCore using default configuration self._AWSAccessKeyIDCustomConfig = "" self._AWSSecretAccessKeyCustomConfig = "" self._AWSSessionTokenCustomConfig = "" @@ -517,7 +516,7 @@ def setBackoffTiming(self, srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSe Make custom settings for backoff timing for reconnect logic srcBaseReconnectTimeSecond - The base reconnection time in seconds srcMaximumReconnectTimeSecond - The maximum reconnection time in seconds - srcMinimumConnectTimeSecond - The minimum time in milliseconds that a connection must be maintained in order to be considered stable + srcMinimumConnectTimeSecond - The minimum time in seconds that a connection must be maintained in order to be considered stable * Raise ValueError if input params are malformed """ self._backoffCore.configTime(srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond) @@ -785,7 +784,7 @@ def reconnect(self): # Non-None value for ._ssl will allow ops before wss-MQTT connection is established rawSSL = ssl.wrap_socket(sock, ca_certs=self._tls_ca_certs, cert_reqs=ssl.CERT_REQUIRED) # Add server certificate verification rawSSL.setblocking(0) # Non-blocking socket - self._ssl = wssCore.securedWebsocketCore(rawSSL, self._host, self._port, self._AWSAccessKeyIDCustomConfig, self._AWSSecretAccessKeyCustomConfig, self._AWSSessionTokenCustomConfig) # Overeride the _ssl socket + self._ssl = SecuredWebSocketCore(rawSSL, self._host, self._port, self._AWSAccessKeyIDCustomConfig, self._AWSSecretAccessKeyCustomConfig, self._AWSSessionTokenCustomConfig) # Overeride the _ssl socket # self._ssl.enableDebug() else: self._ssl = ssl.wrap_socket( @@ -798,7 +797,7 @@ def reconnect(self): ciphers=self._tls_ciphers) if self._tls_insecure is False: - if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 2): + if sys.version_info[0] < 3 or (sys.version_info[0] == 3 and sys.version_info[1] < 5): # No IP host match before 3.5.x self._tls_match_hostname() else: ssl.match_hostname(self._ssl.getpeercert(), self._host) diff --git a/AWSIoTPythonSDK/core/shadow/deviceShadow.py b/AWSIoTPythonSDK/core/shadow/deviceShadow.py index 4404aa8..8b49cca 100755 --- a/AWSIoTPythonSDK/core/shadow/deviceShadow.py +++ b/AWSIoTPythonSDK/core/shadow/deviceShadow.py @@ -1,5 +1,5 @@ # /* -# * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # * # * Licensed under the Apache License, Version 2.0 (the "License"). # * You may not use this file except in compliance with the License. @@ -23,10 +23,6 @@ class _shadowRequestToken: URN_PREFIX_LENGTH = 9 - def __init__(self, srcShadowName, srcClientID): - self._shadowName = srcShadowName - self._clientID = srcClientID - def getNextToken(self): return uuid.uuid4().urn[self.URN_PREFIX_LENGTH:] # We only need the uuid digits, not the urn prefix @@ -86,7 +82,7 @@ def __init__(self, srcShadowName, srcIsPersistentSubscribe, srcShadowManager): # Tool handler self._shadowManagerHandler = srcShadowManager self._basicJSONParserHandler = _basicJSONParser() - self._tokenHandler = _shadowRequestToken(self._shadowName, self._shadowManagerHandler.getClientID()) + self._tokenHandler = _shadowRequestToken() # Properties self._isPersistentSubscribe = srcIsPersistentSubscribe self._lastVersionInSync = -1 # -1 means not initialized @@ -109,60 +105,59 @@ def _doNonPersistentUnsubscribe(self, currentAction): self._shadowManagerHandler.basicShadowUnsubscribe(self._shadowName, currentAction) self._logger.info("Unsubscribed to " + currentAction + " accepted/rejected topics for deviceShadow: " + self._shadowName) - def _generalCallback(self, client, userdata, message): + def generalCallback(self, client, userdata, message): # In Py3.x, message.payload comes in as a bytes(string) # json.loads needs a string input - self._dataStructureLock.acquire() - currentTopic = message.topic - currentAction = self._parseTopicAction(currentTopic) # get/delete/update/delta - currentType = self._parseTopicType(currentTopic) # accepted/rejected/delta - payloadUTF8String = message.payload.decode('utf-8') - # get/delete/update: Need to deal with token, timer and unsubscribe - if currentAction in ["get", "delete", "update"]: - # Check for token - self._basicJSONParserHandler.setString(payloadUTF8String) - if self._basicJSONParserHandler.validateJSON(): # Filter out invalid JSON - currentToken = self._basicJSONParserHandler.getAttributeValue(u"clientToken") - if currentToken is not None: - self._logger.debug("shadow message clientToken: " + currentToken) - if currentToken is not None and currentToken in self._tokenPool.keys(): # Filter out JSON without the desired token - # Sync local version when it is an accepted response - self._logger.debug("Token is in the pool. Type: " + currentType) - if currentType == "accepted": - incomingVersion = self._basicJSONParserHandler.getAttributeValue(u"version") - # If it is get/update accepted response, we need to sync the local version - if incomingVersion is not None and incomingVersion > self._lastVersionInSync and currentAction != "delete": - self._lastVersionInSync = incomingVersion - # If it is a delete accepted, we need to reset the version - else: - self._lastVersionInSync = -1 # The version will always be synced for the next incoming delta/GU-accepted response - # Cancel the timer and clear the token - self._tokenPool[currentToken].cancel() - del self._tokenPool[currentToken] - # Need to unsubscribe? - self._shadowSubscribeStatusTable[currentAction] -= 1 - if not self._isPersistentSubscribe and self._shadowSubscribeStatusTable.get(currentAction) <= 0: - self._shadowSubscribeStatusTable[currentAction] = 0 - processNonPersistentUnsubscribe = Thread(target=self._doNonPersistentUnsubscribe, args=[currentAction]) - processNonPersistentUnsubscribe.start() - # Custom callback - if self._shadowSubscribeCallbackTable.get(currentAction) is not None: - processCustomCallback = Thread(target=self._shadowSubscribeCallbackTable[currentAction], args=[payloadUTF8String, currentType, currentToken]) - processCustomCallback.start() - # delta: Watch for version - else: - currentType += "/" + self._parseTopicShadowName(currentTopic) - # Sync local version - self._basicJSONParserHandler.setString(payloadUTF8String) - if self._basicJSONParserHandler.validateJSON(): # Filter out JSON without version - incomingVersion = self._basicJSONParserHandler.getAttributeValue(u"version") - if incomingVersion is not None and incomingVersion > self._lastVersionInSync: - self._lastVersionInSync = incomingVersion - # Custom callback - if self._shadowSubscribeCallbackTable.get(currentAction) is not None: - processCustomCallback = Thread(target=self._shadowSubscribeCallbackTable[currentAction], args=[payloadUTF8String, currentType, None]) - processCustomCallback.start() - self._dataStructureLock.release() + with self._dataStructureLock: + currentTopic = message.topic + currentAction = self._parseTopicAction(currentTopic) # get/delete/update/delta + currentType = self._parseTopicType(currentTopic) # accepted/rejected/delta + payloadUTF8String = message.payload.decode('utf-8') + # get/delete/update: Need to deal with token, timer and unsubscribe + if currentAction in ["get", "delete", "update"]: + # Check for token + self._basicJSONParserHandler.setString(payloadUTF8String) + if self._basicJSONParserHandler.validateJSON(): # Filter out invalid JSON + currentToken = self._basicJSONParserHandler.getAttributeValue(u"clientToken") + if currentToken is not None: + self._logger.debug("shadow message clientToken: " + currentToken) + if currentToken is not None and currentToken in self._tokenPool.keys(): # Filter out JSON without the desired token + # Sync local version when it is an accepted response + self._logger.debug("Token is in the pool. Type: " + currentType) + if currentType == "accepted": + incomingVersion = self._basicJSONParserHandler.getAttributeValue(u"version") + # If it is get/update accepted response, we need to sync the local version + if incomingVersion is not None and incomingVersion > self._lastVersionInSync and currentAction != "delete": + self._lastVersionInSync = incomingVersion + # If it is a delete accepted, we need to reset the version + else: + self._lastVersionInSync = -1 # The version will always be synced for the next incoming delta/GU-accepted response + # Cancel the timer and clear the token + self._tokenPool[currentToken].cancel() + del self._tokenPool[currentToken] + # Need to unsubscribe? + self._shadowSubscribeStatusTable[currentAction] -= 1 + if not self._isPersistentSubscribe and self._shadowSubscribeStatusTable.get(currentAction) <= 0: + self._shadowSubscribeStatusTable[currentAction] = 0 + processNonPersistentUnsubscribe = Thread(target=self._doNonPersistentUnsubscribe, args=[currentAction]) + processNonPersistentUnsubscribe.start() + # Custom callback + if self._shadowSubscribeCallbackTable.get(currentAction) is not None: + processCustomCallback = Thread(target=self._shadowSubscribeCallbackTable[currentAction], args=[payloadUTF8String, currentType, currentToken]) + processCustomCallback.start() + # delta: Watch for version + else: + currentType += "/" + self._parseTopicShadowName(currentTopic) + # Sync local version + self._basicJSONParserHandler.setString(payloadUTF8String) + if self._basicJSONParserHandler.validateJSON(): # Filter out JSON without version + incomingVersion = self._basicJSONParserHandler.getAttributeValue(u"version") + if incomingVersion is not None and incomingVersion > self._lastVersionInSync: + self._lastVersionInSync = incomingVersion + # Custom callback + if self._shadowSubscribeCallbackTable.get(currentAction) is not None: + processCustomCallback = Thread(target=self._shadowSubscribeCallbackTable[currentAction], args=[payloadUTF8String, currentType, None]) + processCustomCallback.start() def _parseTopicAction(self, srcTopic): ret = None @@ -182,19 +177,18 @@ def _parseTopicShadowName(self, srcTopic): return fragments[2] def _timerHandler(self, srcActionName, srcToken): - self._dataStructureLock.acquire() - # Remove the token - del self._tokenPool[srcToken] - # Need to unsubscribe? - self._shadowSubscribeStatusTable[srcActionName] -= 1 - if not self._isPersistentSubscribe and self._shadowSubscribeStatusTable.get(srcActionName) <= 0: - self._shadowSubscribeStatusTable[srcActionName] = 0 - self._shadowManagerHandler.basicShadowUnsubscribe(self._shadowName, srcActionName) - # Notify time-out issue - if self._shadowSubscribeCallbackTable.get(srcActionName) is not None: - self._logger.info("Shadow request with token: " + str(srcToken) + " has timed out.") - self._shadowSubscribeCallbackTable[srcActionName]("REQUEST TIME OUT", "timeout", srcToken) - self._dataStructureLock.release() + with self._dataStructureLock: + # Remove the token + del self._tokenPool[srcToken] + # Need to unsubscribe? + self._shadowSubscribeStatusTable[srcActionName] -= 1 + if not self._isPersistentSubscribe and self._shadowSubscribeStatusTable.get(srcActionName) <= 0: + self._shadowSubscribeStatusTable[srcActionName] = 0 + self._shadowManagerHandler.basicShadowUnsubscribe(self._shadowName, srcActionName) + # Notify time-out issue + if self._shadowSubscribeCallbackTable.get(srcActionName) is not None: + self._logger.info("Shadow request with token: " + str(srcToken) + " has timed out.") + self._shadowSubscribeCallbackTable[srcActionName]("REQUEST TIME OUT", "timeout", srcToken) def shadowGet(self, srcCallback, srcTimeout): """ @@ -228,22 +222,21 @@ def shadowGet(self, srcCallback, srcTimeout): The token used for tracing in this shadow request. """ - self._dataStructureLock.acquire() - # Update callback data structure - self._shadowSubscribeCallbackTable["get"] = srcCallback - # Update number of pending feedback - self._shadowSubscribeStatusTable["get"] += 1 - # clientToken - currentToken = self._tokenHandler.getNextToken() - self._tokenPool[currentToken] = Timer(srcTimeout, self._timerHandler, ["get", currentToken]) - self._basicJSONParserHandler.setString("{}") - self._basicJSONParserHandler.validateJSON() - self._basicJSONParserHandler.setAttributeValue("clientToken", currentToken) - currentPayload = self._basicJSONParserHandler.regenerateString() - self._dataStructureLock.release() + with self._dataStructureLock: + # Update callback data structure + self._shadowSubscribeCallbackTable["get"] = srcCallback + # Update number of pending feedback + self._shadowSubscribeStatusTable["get"] += 1 + # clientToken + currentToken = self._tokenHandler.getNextToken() + self._tokenPool[currentToken] = Timer(srcTimeout, self._timerHandler, ["get", currentToken]) + self._basicJSONParserHandler.setString("{}") + self._basicJSONParserHandler.validateJSON() + self._basicJSONParserHandler.setAttributeValue("clientToken", currentToken) + currentPayload = self._basicJSONParserHandler.regenerateString() # Two subscriptions if not self._isPersistentSubscribe or not self._isGetSubscribed: - self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "get", self._generalCallback) + self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "get", self.generalCallback) self._isGetSubscribed = True self._logger.info("Subscribed to get accepted/rejected topics for deviceShadow: " + self._shadowName) # One publish @@ -284,22 +277,21 @@ def shadowDelete(self, srcCallback, srcTimeout): The token used for tracing in this shadow request. """ - self._dataStructureLock.acquire() - # Update callback data structure - self._shadowSubscribeCallbackTable["delete"] = srcCallback - # Update number of pending feedback - self._shadowSubscribeStatusTable["delete"] += 1 - # clientToken - currentToken = self._tokenHandler.getNextToken() - self._tokenPool[currentToken] = Timer(srcTimeout, self._timerHandler, ["delete", currentToken]) - self._basicJSONParserHandler.setString("{}") - self._basicJSONParserHandler.validateJSON() - self._basicJSONParserHandler.setAttributeValue("clientToken", currentToken) - currentPayload = self._basicJSONParserHandler.regenerateString() - self._dataStructureLock.release() + with self._dataStructureLock: + # Update callback data structure + self._shadowSubscribeCallbackTable["delete"] = srcCallback + # Update number of pending feedback + self._shadowSubscribeStatusTable["delete"] += 1 + # clientToken + currentToken = self._tokenHandler.getNextToken() + self._tokenPool[currentToken] = Timer(srcTimeout, self._timerHandler, ["delete", currentToken]) + self._basicJSONParserHandler.setString("{}") + self._basicJSONParserHandler.validateJSON() + self._basicJSONParserHandler.setAttributeValue("clientToken", currentToken) + currentPayload = self._basicJSONParserHandler.regenerateString() # Two subscriptions if not self._isPersistentSubscribe or not self._isDeleteSubscribed: - self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "delete", self._generalCallback) + self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "delete", self.generalCallback) self._isDeleteSubscribed = True self._logger.info("Subscribed to delete accepted/rejected topics for deviceShadow: " + self._shadowName) # One publish @@ -343,24 +335,21 @@ def shadowUpdate(self, srcJSONPayload, srcCallback, srcTimeout): """ # Validate JSON - JSONPayloadWithToken = None - currentToken = None self._basicJSONParserHandler.setString(srcJSONPayload) if self._basicJSONParserHandler.validateJSON(): - self._dataStructureLock.acquire() - # clientToken - currentToken = self._tokenHandler.getNextToken() - self._tokenPool[currentToken] = Timer(srcTimeout, self._timerHandler, ["update", currentToken]) - self._basicJSONParserHandler.setAttributeValue("clientToken", currentToken) - JSONPayloadWithToken = self._basicJSONParserHandler.regenerateString() - # Update callback data structure - self._shadowSubscribeCallbackTable["update"] = srcCallback - # Update number of pending feedback - self._shadowSubscribeStatusTable["update"] += 1 - self._dataStructureLock.release() + with self._dataStructureLock: + # clientToken + currentToken = self._tokenHandler.getNextToken() + self._tokenPool[currentToken] = Timer(srcTimeout, self._timerHandler, ["update", currentToken]) + self._basicJSONParserHandler.setAttributeValue("clientToken", currentToken) + JSONPayloadWithToken = self._basicJSONParserHandler.regenerateString() + # Update callback data structure + self._shadowSubscribeCallbackTable["update"] = srcCallback + # Update number of pending feedback + self._shadowSubscribeStatusTable["update"] += 1 # Two subscriptions if not self._isPersistentSubscribe or not self._isUpdateSubscribed: - self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "update", self._generalCallback) + self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "update", self.generalCallback) self._isUpdateSubscribed = True self._logger.info("Subscribed to update accepted/rejected topics for deviceShadow: " + self._shadowName) # One publish @@ -398,12 +387,11 @@ def shadowRegisterDeltaCallback(self, srcCallback): None """ - self._dataStructureLock.acquire() - # Update callback data structure - self._shadowSubscribeCallbackTable["delta"] = srcCallback - self._dataStructureLock.release() + with self._dataStructureLock: + # Update callback data structure + self._shadowSubscribeCallbackTable["delta"] = srcCallback # One subscription - self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "delta", self._generalCallback) + self._shadowManagerHandler.basicShadowSubscribe(self._shadowName, "delta", self.generalCallback) self._logger.info("Subscribed to delta topic for deviceShadow: " + self._shadowName) def shadowUnregisterDeltaCallback(self): @@ -430,10 +418,9 @@ def shadowUnregisterDeltaCallback(self): None """ - self._dataStructureLock.acquire() - # Update callback data structure - del self._shadowSubscribeCallbackTable["delta"] - self._dataStructureLock.release() + with self._dataStructureLock: + # Update callback data structure + del self._shadowSubscribeCallbackTable["delta"] # One unsubscription self._shadowManagerHandler.basicShadowUnsubscribe(self._shadowName, "delta") self._logger.info("Unsubscribed to delta topics for deviceShadow: " + self._shadowName) diff --git a/AWSIoTPythonSDK/core/shadow/shadowManager.py b/AWSIoTPythonSDK/core/shadow/shadowManager.py index 2572aef..3dafa74 100755 --- a/AWSIoTPythonSDK/core/shadow/shadowManager.py +++ b/AWSIoTPythonSDK/core/shadow/shadowManager.py @@ -1,5 +1,5 @@ # /* -# * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # * # * Licensed under the Apache License, Version 2.0 (the "License"). # * You may not use this file except in compliance with the License. @@ -57,32 +57,27 @@ def __init__(self, srcMQTTCore): self._mqttCoreHandler = srcMQTTCore self._shadowSubUnsubOperationLock = Lock() - def getClientID(self): - return self._mqttCoreHandler.getClientID() - def basicShadowPublish(self, srcShadowName, srcShadowAction, srcPayload): currentShadowAction = _shadowAction(srcShadowName, srcShadowAction) self._mqttCoreHandler.publish(currentShadowAction.getTopicGeneral(), srcPayload, 0, False) def basicShadowSubscribe(self, srcShadowName, srcShadowAction, srcCallback): - self._shadowSubUnsubOperationLock.acquire() - currentShadowAction = _shadowAction(srcShadowName, srcShadowAction) - if currentShadowAction.isDelta: - self._mqttCoreHandler.subscribe(currentShadowAction.getTopicDelta(), 0, srcCallback) - else: - self._mqttCoreHandler.subscribe(currentShadowAction.getTopicAccept(), 0, srcCallback) - self._mqttCoreHandler.subscribe(currentShadowAction.getTopicReject(), 0, srcCallback) - time.sleep(2) - self._shadowSubUnsubOperationLock.release() + with self._shadowSubUnsubOperationLock: + currentShadowAction = _shadowAction(srcShadowName, srcShadowAction) + if currentShadowAction.isDelta: + self._mqttCoreHandler.subscribe(currentShadowAction.getTopicDelta(), 0, srcCallback) + else: + self._mqttCoreHandler.subscribe(currentShadowAction.getTopicAccept(), 0, srcCallback) + self._mqttCoreHandler.subscribe(currentShadowAction.getTopicReject(), 0, srcCallback) + time.sleep(2) def basicShadowUnsubscribe(self, srcShadowName, srcShadowAction): - self._shadowSubUnsubOperationLock.acquire() - currentShadowAction = _shadowAction(srcShadowName, srcShadowAction) - if currentShadowAction.isDelta: - self._mqttCoreHandler.unsubscribe(currentShadowAction.getTopicDelta()) - else: - self._logger.debug(currentShadowAction.getTopicAccept()) - self._mqttCoreHandler.unsubscribe(currentShadowAction.getTopicAccept()) - self._logger.debug(currentShadowAction.getTopicReject()) - self._mqttCoreHandler.unsubscribe(currentShadowAction.getTopicReject()) - self._shadowSubUnsubOperationLock.release() + with self._shadowSubUnsubOperationLock: + currentShadowAction = _shadowAction(srcShadowName, srcShadowAction) + if currentShadowAction.isDelta: + self._mqttCoreHandler.unsubscribe(currentShadowAction.getTopicDelta()) + else: + self._logger.debug(currentShadowAction.getTopicAccept()) + self._mqttCoreHandler.unsubscribe(currentShadowAction.getTopicAccept()) + self._logger.debug(currentShadowAction.getTopicReject()) + self._mqttCoreHandler.unsubscribe(currentShadowAction.getTopicReject()) diff --git a/AWSIoTPythonSDK/core/util/enums.py b/AWSIoTPythonSDK/core/util/enums.py new file mode 100644 index 0000000..3aa3d2f --- /dev/null +++ b/AWSIoTPythonSDK/core/util/enums.py @@ -0,0 +1,19 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + + +class DropBehaviorTypes(object): + DROP_OLDEST = 0 + DROP_NEWEST = 1 diff --git a/AWSIoTPythonSDK/core/util/offlinePublishQueue.py b/AWSIoTPythonSDK/core/util/offlinePublishQueue.py deleted file mode 100755 index 8ba2d44..0000000 --- a/AWSIoTPythonSDK/core/util/offlinePublishQueue.py +++ /dev/null @@ -1,92 +0,0 @@ -# /* -# * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# * -# * Licensed under the Apache License, Version 2.0 (the "License"). -# * You may not use this file except in compliance with the License. -# * A copy of the License is located at -# * -# * http://aws.amazon.com/apache2.0 -# * -# * or in the "license" file accompanying this file. This file is distributed -# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# * express or implied. See the License for the specific language governing -# * permissions and limitations under the License. -# */ - -# This class implements the offline Publish Queue, with configurable length and drop behaviors. -# This queue will be used as the offline Publish Queue for all message outside Paho as an option -# to publish to when the client is offline. -# DROP_OLDEST: Drop the head of the queue when the size limit is reached. -# DROP_NEWEST: Drop the new incoming elements when the size limit is reached. - -import logging - -class offlinePublishQueue(list): - - _DROPBEHAVIOR_OLDEST = 0 - _DROPBEHAVIOR_NEWEST = 1 - - APPEND_FAILURE_QUEUE_FULL = -1 - APPEND_FAILURE_QUEUE_DISABLED = -2 - APPEND_SUCCESS = 0 - - _logger = logging.getLogger(__name__) - - def __init__(self, srcMaximumSize, srcDropBehavior=1): - if not isinstance(srcMaximumSize, int) or not isinstance(srcDropBehavior, int): - self._logger.error("init: MaximumSize/DropBehavior must be integer.") - raise TypeError("MaximumSize/DropBehavior must be integer.") - if srcDropBehavior != self._DROPBEHAVIOR_OLDEST and srcDropBehavior != self._DROPBEHAVIOR_NEWEST: - self._logger.error("init: Drop behavior not supported.") - raise ValueError("Drop behavior not supported.") - list.__init__([]) - self._dropBehavior = srcDropBehavior - # When self._maximumSize > 0, queue is limited - # When self._maximumSize == 0, queue is disabled - # When self._maximumSize < 0. queue is infinite - self._maximumSize = srcMaximumSize - - def _isEnabled(self): - return self._maximumSize != 0 - - def _needDropMessages(self): - # Need to drop messages when: - # 1. Queue is limited and full - # 2. Queue is disabled - isQueueFull = len(self) >= self._maximumSize - isQueueLimited = self._maximumSize > 0 - isQueueDisabled = not self._isEnabled() - return (isQueueFull and isQueueLimited) or isQueueDisabled - - def setQueueBehaviorDropNewest(self): - self._dropBehavior = self._DROPBEHAVIOR_NEWEST - - def setQueueBehaviorDropOldest(self): - self._dropBehavior = self._DROPBEHAVIOR_OLDEST - - # Override - # Append to a queue with a limited size. - # Return APPEND_SUCCESS if the append is successful - # Return APPEND_FAILURE_QUEUE_FULL if the append failed because the queue is full - # Return APPEND_FAILURE_QUEUE_DISABLED if the append failed because the queue is disabled - def append(self, srcData): - ret = self.APPEND_SUCCESS - if self._isEnabled(): - if self._needDropMessages(): - # We should drop the newest - if self._dropBehavior == self._DROPBEHAVIOR_NEWEST: - self._logger.warn("append: Full queue. Drop the newest: " + str(srcData)) - ret = self.APPEND_FAILURE_QUEUE_FULL - # We should drop the oldest - else: - currentOldest = super(offlinePublishQueue, self).pop(0) - self._logger.warn("append: Full queue. Drop the oldest: " + str(currentOldest)) - super(offlinePublishQueue, self).append(srcData) - ret = self.APPEND_FAILURE_QUEUE_FULL - else: - self._logger.debug("append: Add new element: " + str(srcData)) - super(offlinePublishQueue, self).append(srcData) - else: - self._logger.debug("append: Queue is disabled. Drop the message: " + str(srcData)) - ret = self.APPEND_FAILURE_QUEUE_DISABLED - return ret diff --git a/AWSIoTPythonSDK/core/util/progressiveBackoffCore.py b/AWSIoTPythonSDK/core/util/progressiveBackoffCore.py deleted file mode 100755 index cc56533..0000000 --- a/AWSIoTPythonSDK/core/util/progressiveBackoffCore.py +++ /dev/null @@ -1,91 +0,0 @@ -# /* -# * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# * -# * Licensed under the Apache License, Version 2.0 (the "License"). -# * You may not use this file except in compliance with the License. -# * A copy of the License is located at -# * -# * http://aws.amazon.com/apache2.0 -# * -# * or in the "license" file accompanying this file. This file is distributed -# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# * express or implied. See the License for the specific language governing -# * permissions and limitations under the License. -# */ - - # This class implements the progressive backoff logic for auto-reconnect. - # It manages the reconnect wait time for the current reconnect, controling - # when to increase it and when to reset it. - -import time -import threading -import logging - - -class progressiveBackoffCore: - - # Logger - _logger = logging.getLogger(__name__) - - def __init__(self, srcBaseReconnectTimeSecond=1, srcMaximumReconnectTimeSecond=32, srcMinimumConnectTimeSecond=20): - # The base reconnection time in seconds, default 1 - self._baseReconnectTimeSecond = srcBaseReconnectTimeSecond - # The maximum reconnection time in seconds, default 32 - self._maximumReconnectTimeSecond = srcMaximumReconnectTimeSecond - # The minimum time in milliseconds that a connection must be maintained in order to be considered stable - # Default 20 - self._minimumConnectTimeSecond = srcMinimumConnectTimeSecond - # Current backOff time in seconds, init to equal to 0 - self._currentBackoffTimeSecond = 1 - # Handler for timer - self._resetBackoffTimer = None - - # For custom progressiveBackoff timing configuration - def configTime(self, srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond): - if srcBaseReconnectTimeSecond < 0 or srcMaximumReconnectTimeSecond < 0 or srcMinimumConnectTimeSecond < 0: - self._logger.error("init: Negative time configuration detected.") - raise ValueError("Negative time configuration detected.") - if srcBaseReconnectTimeSecond >= srcMinimumConnectTimeSecond: - self._logger.error("init: Min connect time should be bigger than base reconnect time.") - raise ValueError("Min connect time should be bigger than base reconnect time.") - self._baseReconnectTimeSecond = srcBaseReconnectTimeSecond - self._maximumReconnectTimeSecond = srcMaximumReconnectTimeSecond - self._minimumConnectTimeSecond = srcMinimumConnectTimeSecond - self._currentBackoffTimeSecond = 1 - - # Block the reconnect logic for _currentBackoffTimeSecond - # Update the currentBackoffTimeSecond for the next reconnect - # Cancel the in-waiting timer for resetting backOff time - # This should get called only when a disconnect/reconnect happens - def backOff(self): - self._logger.debug("backOff: current backoff time is: " + str(self._currentBackoffTimeSecond) + " sec.") - if self._resetBackoffTimer is not None: - # Cancel the timer - self._resetBackoffTimer.cancel() - # Block the reconnect logic - time.sleep(self._currentBackoffTimeSecond) - # Update the backoff time - if self._currentBackoffTimeSecond == 0: - # This is the first attempt to connect, set it to base - self._currentBackoffTimeSecond = self._baseReconnectTimeSecond - else: - # r_cur = min(2^n*r_base, r_max) - self._currentBackoffTimeSecond = min(self._maximumReconnectTimeSecond, self._currentBackoffTimeSecond * 2) - - # Start the timer for resetting _currentBackoffTimeSecond - # Will be cancelled upon calling backOff - def startStableConnectionTimer(self): - self._resetBackoffTimer = threading.Timer(self._minimumConnectTimeSecond, self._connectionStableThenResetBackoffTime) - self._resetBackoffTimer.start() - - def stopStableConnectionTimer(self): - if self._resetBackoffTimer is not None: - # Cancel the timer - self._resetBackoffTimer.cancel() - - # Timer callback to reset _currentBackoffTimeSecond - # If the connection is stable for longer than _minimumConnectTimeSecond, - # reset the currentBackoffTimeSecond to _baseReconnectTimeSecond - def _connectionStableThenResetBackoffTime(self): - self._logger.debug("stableConnection: Resetting the backoff time to: " + str(self._baseReconnectTimeSecond) + " sec.") - self._currentBackoffTimeSecond = self._baseReconnectTimeSecond diff --git a/AWSIoTPythonSDK/core/util/providers.py b/AWSIoTPythonSDK/core/util/providers.py new file mode 100644 index 0000000..d90789a --- /dev/null +++ b/AWSIoTPythonSDK/core/util/providers.py @@ -0,0 +1,92 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + + +class CredentialsProvider(object): + + def __init__(self): + self._ca_path = "" + + def set_ca_path(self, ca_path): + self._ca_path = ca_path + + def get_ca_path(self): + return self._ca_path + + +class CertificateCredentialsProvider(CredentialsProvider): + + def __init__(self): + CredentialsProvider.__init__(self) + self._cert_path = "" + self._key_path = "" + + def set_cert_path(self,cert_path): + self._cert_path = cert_path + + def set_key_path(self, key_path): + self._key_path = key_path + + def get_cert_path(self): + return self._cert_path + + def get_key_path(self): + return self._key_path + + +class IAMCredentialsProvider(CredentialsProvider): + + def __init__(self): + CredentialsProvider.__init__(self) + self._aws_access_key_id = "" + self._aws_secret_access_key = "" + self._aws_session_token = "" + + def set_access_key_id(self, access_key_id): + self._aws_access_key_id = access_key_id + + def set_secret_access_key(self, secret_access_key): + self._aws_secret_access_key = secret_access_key + + def set_session_token(self, session_token): + self._aws_session_token = session_token + + def get_access_key_id(self): + return self._aws_access_key_id + + def get_secret_access_key(self): + return self._aws_secret_access_key + + def get_session_token(self): + return self._aws_session_token + + +class EndpointProvider(object): + + def __init__(self): + self._host = "" + self._port = -1 + + def set_host(self, host): + self._host = host + + def set_port(self, port): + self._port = port + + def get_host(self): + return self._host + + def get_port(self): + return self._port diff --git a/AWSIoTPythonSDK/core/util/sigV4Core.py b/AWSIoTPythonSDK/core/util/sigV4Core.py deleted file mode 100755 index 0b22dab..0000000 --- a/AWSIoTPythonSDK/core/util/sigV4Core.py +++ /dev/null @@ -1,187 +0,0 @@ -# /* -# * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# * -# * Licensed under the Apache License, Version 2.0 (the "License"). -# * You may not use this file except in compliance with the License. -# * A copy of the License is located at -# * -# * http://aws.amazon.com/apache2.0 -# * -# * or in the "license" file accompanying this file. This file is distributed -# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# * express or implied. See the License for the specific language governing -# * permissions and limitations under the License. -# */ - -# This class implements the sigV4 signing process and return the signed URL for connection - -import os -import datetime -import hashlib -import hmac -try: - from urllib.parse import quote # Python 3+ -except ImportError: - from urllib import quote -import logging -# INI config file handling -try: - from configparser import ConfigParser # Python 3+ - from configparser import NoOptionError - from configparser import NoSectionError -except ImportError: - from ConfigParser import ConfigParser - from ConfigParser import NoOptionError - from ConfigParser import NoSectionError - -class sigV4Core: - - _logger = logging.getLogger(__name__) - - def __init__(self): - self._aws_access_key_id = "" - self._aws_secret_access_key = "" - self._aws_session_token = "" - self._credentialConfigFilePath = "~/.aws/credentials" - - def setIAMCredentials(self, srcAWSAccessKeyID, srcAWSSecretAccessKey, srcAWSSessionToken): - self._aws_access_key_id = srcAWSAccessKeyID - self._aws_secret_access_key = srcAWSSecretAccessKey - self._aws_session_token = srcAWSSessionToken - - def _createAmazonDate(self): - # Returned as a unicode string in Py3.x - amazonDate = [] - currentTime = datetime.datetime.utcnow() - YMDHMS = currentTime.strftime('%Y%m%dT%H%M%SZ') - YMD = YMDHMS[0:YMDHMS.index('T')] - amazonDate.append(YMD) - amazonDate.append(YMDHMS) - return amazonDate - - def _sign(self, key, message): - # Returned as a utf-8 byte string in Py3.x - return hmac.new(key, message.encode('utf-8'), hashlib.sha256).digest() - - def _getSignatureKey(self, key, dateStamp, regionName, serviceName): - # Returned as a utf-8 byte string in Py3.x - kDate = self._sign(('AWS4' + key).encode('utf-8'), dateStamp) - kRegion = self._sign(kDate, regionName) - kService = self._sign(kRegion, serviceName) - kSigning = self._sign(kService, 'aws4_request') - return kSigning - - def _checkIAMCredentials(self): - # Check custom config - ret = self._checkKeyInCustomConfig() - # Check environment variables - if not ret: - ret = self._checkKeyInEnv() - # Check files - if not ret: - ret = self._checkKeyInFiles() - # All credentials returned as unicode strings in Py3.x - return ret - - def _checkKeyInEnv(self): - ret = dict() - self._aws_access_key_id = os.environ.get('AWS_ACCESS_KEY_ID') - self._aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY') - self._aws_session_token = os.environ.get('AWS_SESSION_TOKEN') - if self._aws_access_key_id is not None and self._aws_secret_access_key is not None: - ret["aws_access_key_id"] = self._aws_access_key_id - ret["aws_secret_access_key"] = self._aws_secret_access_key - # We do not necessarily need session token... - if self._aws_session_token is not None: - ret["aws_session_token"] = self._aws_session_token - self._logger.debug("IAM credentials from env var.") - return ret - - def _checkKeyInINIDefault(self, srcConfigParser, sectionName): - ret = dict() - # Check aws_access_key_id and aws_secret_access_key - try: - ret["aws_access_key_id"] = srcConfigParser.get(sectionName, "aws_access_key_id") - ret["aws_secret_access_key"] = srcConfigParser.get(sectionName, "aws_secret_access_key") - except NoOptionError: - self._logger.warn("Cannot find IAM keyID/secretKey in credential file.") - # We do not continue searching if we cannot even get IAM id/secret right - if len(ret) == 2: - # Check aws_session_token, optional - try: - ret["aws_session_token"] = srcConfigParser.get(sectionName, "aws_session_token") - except NoOptionError: - self._logger.debug("No AWS Session Token found.") - return ret - - def _checkKeyInFiles(self): - credentialFile = None - credentialConfig = None - ret = dict() - # Should be compatible with aws cli default credential configuration - # *NIX/Windows - try: - # See if we get the file - credentialConfig = ConfigParser() - credentialFilePath = os.path.expanduser(self._credentialConfigFilePath) # Is it compatible with windows? \/ - credentialConfig.read(credentialFilePath) - # Now we have the file, start looking for credentials... - # 'default' section - ret = self._checkKeyInINIDefault(credentialConfig, "default") - if not ret: - # 'DEFAULT' section - ret = self._checkKeyInINIDefault(credentialConfig, "DEFAULT") - self._logger.debug("IAM credentials from file.") - except IOError: - self._logger.debug("No IAM credential configuration file in " + credentialFilePath) - except NoSectionError: - self._logger.error("Cannot find IAM 'default' section.") - return ret - - def _checkKeyInCustomConfig(self): - ret = dict() - if self._aws_access_key_id != "" and self._aws_secret_access_key != "": - ret["aws_access_key_id"] = self._aws_access_key_id - ret["aws_secret_access_key"] = self._aws_secret_access_key - # We do not necessarily need session token... - if self._aws_session_token != "": - ret["aws_session_token"] = self._aws_session_token - self._logger.debug("IAM credentials from custom config.") - return ret - - def createWebsocketEndpoint(self, host, port, region, method, awsServiceName, path): - # Return the endpoint as unicode string in 3.x - # Gather all the facts - amazonDate = self._createAmazonDate() - amazonDateSimple = amazonDate[0] # Unicode in 3.x - amazonDateComplex = amazonDate[1] # Unicode in 3.x - allKeys = self._checkIAMCredentials() # Unicode in 3.x - hasCredentialsNecessaryForWebsocket = "aws_access_key_id" in allKeys.keys() and "aws_secret_access_key" in allKeys.keys() - if not hasCredentialsNecessaryForWebsocket: - return "" - else: - keyID = allKeys["aws_access_key_id"] - secretKey = allKeys["aws_secret_access_key"] - queryParameters = "X-Amz-Algorithm=AWS4-HMAC-SHA256" + \ - "&X-Amz-Credential=" + keyID + "%2F" + amazonDateSimple + "%2F" + region + "%2F" + awsServiceName + "%2Faws4_request" + \ - "&X-Amz-Date=" + amazonDateComplex + \ - "&X-Amz-Expires=86400" + \ - "&X-Amz-SignedHeaders=host" # Unicode in 3.x - hashedPayload = hashlib.sha256(str("").encode('utf-8')).hexdigest() # Unicode in 3.x - # Create the string to sign - signedHeaders = "host" - canonicalHeaders = "host:" + host + "\n" - canonicalRequest = method + "\n" + path + "\n" + queryParameters + "\n" + canonicalHeaders + "\n" + signedHeaders + "\n" + hashedPayload # Unicode in 3.x - hashedCanonicalRequest = hashlib.sha256(str(canonicalRequest).encode('utf-8')).hexdigest() # Unicoede in 3.x - stringToSign = "AWS4-HMAC-SHA256\n" + amazonDateComplex + "\n" + amazonDateSimple + "/" + region + "/" + awsServiceName + "/aws4_request\n" + hashedCanonicalRequest # Unicode in 3.x - # Sign it - signingKey = self._getSignatureKey(secretKey, amazonDateSimple, region, awsServiceName) - signature = hmac.new(signingKey, (stringToSign).encode("utf-8"), hashlib.sha256).hexdigest() - # generate url - url = "wss://" + host + ":" + str(port) + path + '?' + queryParameters + "&X-Amz-Signature=" + signature - # See if we have STS token, if we do, add it - if "aws_session_token" in allKeys.keys(): - aws_session_token = allKeys["aws_session_token"] - url += "&X-Amz-Security-Token=" + quote(aws_session_token.encode("utf-8")) # Unicode in 3.x - self._logger.debug("createWebsocketEndpoint: Websocket URL: " + url) - return url diff --git a/AWSIoTPythonSDK/exception/AWSIoTExceptions.py b/AWSIoTPythonSDK/exception/AWSIoTExceptions.py index 0ddfa73..3bfc6e2 100755 --- a/AWSIoTPythonSDK/exception/AWSIoTExceptions.py +++ b/AWSIoTPythonSDK/exception/AWSIoTExceptions.py @@ -1,5 +1,5 @@ # /* -# * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # * # * Licensed under the Apache License, Version 2.0 (the "License"). # * You may not use this file except in compliance with the License. @@ -80,11 +80,31 @@ def __init__(self, errorCode): self.message = "Subscribe Error: " + str(errorCode) +class subscribeQueueFullException(operationError.operationError): + def __init__(self): + self.message = "Internal Subscribe Queue Full" + + +class subscribeQueueDisabledException(operationError.operationError): + def __init__(self): + self.message = "Offline subscribe request dropped because queueing is disabled" + + class unsubscribeError(operationError.operationError): def __init__(self, errorCode): self.message = "Unsubscribe Error: " + str(errorCode) +class unsubscribeQueueFullException(operationError.operationError): + def __init__(self): + self.message = "Internal Unsubscribe Queue Full" + + +class unsubscribeQueueDisabledException(operationError.operationError): + def __init__(self): + self.message = "Offline unsubscribe request dropped because queueing is disabled" + + # Websocket Error class wssNoKeyInEnvironmentError(operationError.operationError): def __init__(self): @@ -94,3 +114,34 @@ def __init__(self): class wssHandShakeError(operationError.operationError): def __init__(self): self.message = "Error in WSS handshake." + + +# Greengrass Discovery Error +class DiscoveryDataNotFoundException(operationError.operationError): + def __init__(self): + self.message = "No discovery data found" + + +class DiscoveryTimeoutException(operationTimeoutException.operationTimeoutException): + def __init__(self, message="Discovery request timed out"): + self.message = message + + +class DiscoveryInvalidRequestException(operationError.operationError): + def __init__(self): + self.message = "Invalid discovery request" + + +class DiscoveryUnauthorizedException(operationError.operationError): + def __init__(self): + self.message = "Discovery request not authorized" + + +class DiscoveryThrottlingException(operationError.operationError): + def __init__(self): + self.message = "Too many discovery requests" + + +class DiscoveryFailure(operationError.operationError): + def __init__(self, message): + self.message = message diff --git a/AWSIoTPythonSDK/exception/operationError.py b/AWSIoTPythonSDK/exception/operationError.py index efbb399..1c86dfc 100755 --- a/AWSIoTPythonSDK/exception/operationError.py +++ b/AWSIoTPythonSDK/exception/operationError.py @@ -1,5 +1,5 @@ # /* -# * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # * # * Licensed under the Apache License, Version 2.0 (the "License"). # * You may not use this file except in compliance with the License. diff --git a/AWSIoTPythonSDK/exception/operationTimeoutException.py b/AWSIoTPythonSDK/exception/operationTimeoutException.py index 48d4f15..737154e 100755 --- a/AWSIoTPythonSDK/exception/operationTimeoutException.py +++ b/AWSIoTPythonSDK/exception/operationTimeoutException.py @@ -1,5 +1,5 @@ # /* -# * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. # * # * Licensed under the Apache License, Version 2.0 (the "License"). # * You may not use this file except in compliance with the License. diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9c48d50..8138996 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,18 @@ CHANGELOG ========= +1.2.0 +===== +* improvement:AWSIoTMQTTClient:Improved synchronous API backend for ACK tracking +* feature:AWSIoTMQTTClient:New API for asynchronous API +* feature:AWSIoTMQTTClient:Expose general notification callbacks for online, offline and message arrival +* feature:AWSIoTMQTTShadowClient:Expose general notification callbacks for online, offline and message arrival +* feature:AWSIoTMQTTClient:Extend offline queueing to include offline subscribe/unsubscribe requests +* feature:DiscoveryInfoProvider:Support for Greengrass discovery +* bugfix:Pull request:`#50 `__ +* bugfix:Pull request:`#51 `__ +* bugfix:Issue:`#52 `__ + 1.1.2 ===== * bugfix:Issue:`#28 `__ diff --git a/README.rst b/README.rst index 6a7eaf1..ea11aa7 100755 --- a/README.rst +++ b/README.rst @@ -292,13 +292,51 @@ MQTT operations along with shadow operations: myMQTTClient = myShadowClient.getMQTTConnection() myMQTTClient.publish("plainMQTTTopic", "Payload", 1) +DiscoveryInfoProvider +_____________________ + +This is the client class for device discovery process with AWS IoT Greengrass. +You can initialize and configure the client like this: + +.. code-block:: python + + from AWSIoTPythonSDK.core.greengrass.discovery.providers import DiscoveryInfoProvider + + discoveryInfoProvider = DiscoveryInfoProvider() + discoveryInfoProvider.configureEndpoint("YOUR.IOT.ENDPOINT") + discoveryInfoProvider.configureCredentials("YOUR/ROOT/CA/PATH", "CERTIFICATE/PATH", "PRIVATE/KEY/PATH") + discoveryInfoProvider.configureTimeout(10) # 10 sec + +To perform the discovery process for a Greengrass Aware Device (GGAD) that belongs to a deployed group, your script +should look like this: + +.. code-block:: python + + discoveryInfo = discoveryInfoProvider.discover("myGGADThingName") + # I know nothing about the group/core I want to connect to. I want to iterate through all cores and find out. + coreList = discoveryInfo.getAllCores() + groupIdCAList = discoveryInfo.getAllCas() # list([(groupId, ca), ...]) + # I know nothing about the group/core I want to connect to. I want to iterate through all groups and find out. + groupList = discoveryInfo.getAllGroups() + # I know exactly which group, which core and which connectivity info I need to connect. + connectivityInfo = discoveryInfo.toObjectAtGroupLevel()["YOUR_GROUP_ID"] + .getCoreConnectivityInfo("YOUR_CORE_THING_ARN") + .getConnectivityInfo("YOUR_CONNECTIVITY_ID") + # Connecting logic follows... + ... + +For more information about discovery information access at group/core/connectivity info set level, please refer to the +API documentation for ``AWSIoTPythonSDK.core.greengrass.discovery.models``, +`Greengrass Discovery documentation `__ +or `Greengrass overall documentation `__. + .. _Key_Features: Key Features ~~~~~~~~~~~~ -Progressive Reconnect Backoff -_____________________________ +Progressive Reconnect Back Off +______________________________ When a non-client-side disconnect occurs, the SDK will reconnect automatically. The following APIs are provided for configuration: @@ -332,11 +370,11 @@ default configuration for backoff timing will be performed on initialization: maxReconnectQuietTimeSecond = 32 stableConnectionTimeSecond = 20 -Offline Publish Requests Queueing with Draining -_______________________________________________ +Offline Requests Queueing with Draining +_______________________________________ If the client is temporarily offline and disconnected due to -network failure, publish requests will be added to an internal +network failure, publish/subscribe/unsubscribe requests will be added to an internal queue until the number of queued-up requests reaches the size limit of the queue. This functionality is for plain MQTT operations. Shadow client contains time-sensitive data and is therefore not supported. @@ -347,7 +385,7 @@ The following API is provided for configuration: AWSIoTPythonSDK.MQTTLib.AWSIoTMQTTClient.configureOfflinePublishQueueing(queueSize, dropBehavior) -After the queue is full, offline publish requests will be discarded or +After the queue is full, offline publish/subscribe/unsubscribe requests will be discarded or replaced according to the configuration of the drop behavior: .. code-block:: python @@ -406,7 +444,7 @@ Because the queue is already full, the newest requests ``pub_req6`` and When the client is back online, connected, and resubscribed to all topics it has previously subscribed to, the draining starts. All requests -in the offline publish queue will be resent at the configured draining +in the offline request queue will be resent at the configured draining rate: .. code-block:: python @@ -414,7 +452,7 @@ rate: AWSIoTPythonSDK.MQTTLib.AWSIoTMQTTClient.configureDrainingFrequency(frequencyInHz) If no ``configOfflinePublishQueue`` or ``configureDrainingFrequency`` is -called, the following default configuration for offline publish queueing +called, the following default configuration for offline request queueing and draining will be performed on the initialization: .. code-block:: python @@ -423,16 +461,16 @@ and draining will be performed on the initialization: dropBehavior = DROP_NEWEST drainingFrequency = 2Hz -Before the draining process is complete, any new publish request +Before the draining process is complete, any new publish/subscribe/unsubscribe request within this time period will be added to the queue. Therefore, the draining rate -should be higher than the normal publish rate to avoid an endless +should be higher than the normal request rate to avoid an endless draining process after reconnect. The disconnect event is detected based on PINGRESP MQTT -packet loss. Offline publish queueing will not be triggered until the +packet loss. Offline request queueing will not be triggered until the disconnect event is detected. Configuring a shorter keep-alive interval allows the client to detect disconnects more quickly. Any QoS0 -publish requests issued after the network failure and before the +publish, subscribe and unsubscribe requests issued after the network failure and before the detection of the PINGRESP loss will be lost. Persistent/Non-Persistent Subscription @@ -551,6 +589,37 @@ Source The example is available in ``samples/basicPubSub/``. +BasicPubSub Asynchronous version +________________________________ + +This example demonstrates a simple MQTT publish/subscribe with asynchronous APIs using AWS IoT. +It first registers general notification callbacks for CONNACK reception, disconnect reception and message arrival. +It then registers ACK callbacks for subscribe and publish requests to print out received ack packet ids. +It subscribes to a topic with no specific callback and then publishes to the same topic in a loop. +New messages are printed upon reception by the general message arrival callback, indicating +the callback function has been called. +New ack packet ids are printed upon reception of PUBACK and SUBACK through ACK callbacks registered with asynchronous +API calls, indicating that the the client received ACKs for the corresponding asynchronous API calls. + +Instructions +************ + +Run the example like this: + +.. code-block:: python + + # Certificate based mutual authentication + python basicPubSubAsync.py -e -r -c -k + # MQTT over WebSocket + python basicPubSubAsync.py -e -r -w + # Customize client id and topic + python basicPubSubAsync.py -e -r -c -k -id -t + +Source +****** + +The example is available in ``samples/basicPubSub/``. + BasicShadow ___________ @@ -638,6 +707,37 @@ Source The example is available in ``samples/ThingShadowEcho/``. +BasicDiscovery +______________ + +This example demonstrates how to perform a discovery process from a Greengrass Aware Device (GGAD) to obtain the required +connectivity/identity information to connect to the Greengrass Core (GGC) deployed within the same group. It uses the +discovery information provider to invoke discover call for a certain GGAD with its thing name. After it gets back a +success response, it picks up the first GGC and the first set of identity information (CA) for the first group, persists \ +it locally and iterates through all connectivity info sets for this GGC to establish a MQTT connection to the designated +GGC. It then publishes messages to the topic, which, on the GGC side, is configured to route the messages back to the +same GGAD. Therefore, it receives the published messages and invokes the corresponding message callbacks. + +Note that in order to get the sample up and running correctly, you need: + +1. Have a successfully deployed Greengrass group. + +2. Use the certificate and private key that have been deployed with the group for the GGAD to perform discovery process. + +3. The subscription records for that deployed group should contain a route that routes messages from the targeted GGAD to itself via a dedicated MQTT topic. + +4. The deployed GGAD thing name, the deployed GGAD certificate/private key and the dedicated MQTT topic should be used as the inputs for this sample. + + +Run the sample like this: + +.. code-block:: python + + python basicDiscovery.py -e -r -c -k -n -t + +If the group, GGC, GGAD and group subscription/routes are set up correctly, you should be able to see the sample running +on your GGAD, receiving messages that get published to GGC by itself. + .. _API_Documentation: API Documentation diff --git a/samples/ThingShadowEcho/ThingShadowEcho.py b/samples/ThingShadowEcho/ThingShadowEcho.py index a026ca9..ba229b3 100755 --- a/samples/ThingShadowEcho/ThingShadowEcho.py +++ b/samples/ThingShadowEcho/ThingShadowEcho.py @@ -1,6 +1,6 @@ ''' /* - * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,28 +16,29 @@ ''' from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient -import sys import logging import time import json import argparse + class shadowCallbackContainer: - def __init__(self, deviceShadowInstance): - self.deviceShadowInstance = deviceShadowInstance - - # Custom Shadow callback - def customShadowCallback_Delta(self, payload, responseStatus, token): - # payload is a JSON string ready to be parsed using json.loads(...) - # in both Py2.x and Py3.x - print("Received a delta message:") - payloadDict = json.loads(payload) - deltaMessage = json.dumps(payloadDict["state"]) - print(deltaMessage) - print("Request to update the reported state...") - newPayload = '{"state":{"reported":' + deltaMessage + '}}' - self.deviceShadowInstance.shadowUpdate(newPayload, None, 5) - print("Sent.") + def __init__(self, deviceShadowInstance): + self.deviceShadowInstance = deviceShadowInstance + + # Custom Shadow callback + def customShadowCallback_Delta(self, payload, responseStatus, token): + # payload is a JSON string ready to be parsed using json.loads(...) + # in both Py2.x and Py3.x + print("Received a delta message:") + payloadDict = json.loads(payload) + deltaMessage = json.dumps(payloadDict["state"]) + print(deltaMessage) + print("Request to update the reported state...") + newPayload = '{"state":{"reported":' + deltaMessage + '}}' + self.deviceShadowInstance.shadowUpdate(newPayload, None, 5) + print("Sent.") + # Read in command-line parameters parser = argparse.ArgumentParser() @@ -48,7 +49,8 @@ def customShadowCallback_Delta(self, payload, responseStatus, token): parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False, help="Use MQTT over WebSocket") parser.add_argument("-n", "--thingName", action="store", dest="thingName", default="Bot", help="Targeted thing name") -parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="ThingShadowEcho", help="Targeted client id") +parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="ThingShadowEcho", + help="Targeted client id") args = parser.parse_args() host = args.host @@ -60,12 +62,12 @@ def customShadowCallback_Delta(self, payload, responseStatus, token): clientId = args.clientId if args.useWebsocket and args.certificatePath and args.privateKeyPath: - parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.") - exit(2) + parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.") + exit(2) if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath): - parser.error("Missing credentials for authentication.") - exit(2) + parser.error("Missing credentials for authentication.") + exit(2) # Configure logging logger = logging.getLogger("AWSIoTPythonSDK.core") @@ -78,13 +80,13 @@ def customShadowCallback_Delta(self, payload, responseStatus, token): # Init AWSIoTMQTTShadowClient myAWSIoTMQTTShadowClient = None if useWebsocket: - myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId, useWebsocket=True) - myAWSIoTMQTTShadowClient.configureEndpoint(host, 443) - myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath) + myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId, useWebsocket=True) + myAWSIoTMQTTShadowClient.configureEndpoint(host, 443) + myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath) else: - myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId) - myAWSIoTMQTTShadowClient.configureEndpoint(host, 8883) - myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath) + myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId) + myAWSIoTMQTTShadowClient.configureEndpoint(host, 8883) + myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath) # AWSIoTMQTTShadowClient configuration myAWSIoTMQTTShadowClient.configureAutoReconnectBackoffTime(1, 32, 20) @@ -103,4 +105,4 @@ def customShadowCallback_Delta(self, payload, responseStatus, token): # Loop forever while True: - time.sleep(1) + time.sleep(1) diff --git a/samples/basicPubSub/basicPubSub.py b/samples/basicPubSub/basicPubSub.py index 1ef4e84..18c4af0 100755 --- a/samples/basicPubSub/basicPubSub.py +++ b/samples/basicPubSub/basicPubSub.py @@ -1,6 +1,6 @@ ''' /* - * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,18 +16,19 @@ ''' from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient -import sys import logging import time import argparse + # Custom MQTT message callback def customCallback(client, userdata, message): - print("Received a new message: ") - print(message.payload) - print("from topic: ") - print(message.topic) - print("--------------\n\n") + print("Received a new message: ") + print(message.payload) + print("from topic: ") + print(message.topic) + print("--------------\n\n") + # Read in command-line parameters parser = argparse.ArgumentParser() @@ -37,7 +38,8 @@ def customCallback(client, userdata, message): parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="Private key file path") parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False, help="Use MQTT over WebSocket") -parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicPubSub", help="Targeted client id") +parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicPubSub", + help="Targeted client id") parser.add_argument("-t", "--topic", action="store", dest="topic", default="sdk/test/Python", help="Targeted topic") args = parser.parse_args() @@ -50,12 +52,12 @@ def customCallback(client, userdata, message): topic = args.topic if args.useWebsocket and args.certificatePath and args.privateKeyPath: - parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.") - exit(2) + parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.") + exit(2) if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath): - parser.error("Missing credentials for authentication.") - exit(2) + parser.error("Missing credentials for authentication.") + exit(2) # Configure logging logger = logging.getLogger("AWSIoTPythonSDK.core") @@ -68,13 +70,13 @@ def customCallback(client, userdata, message): # Init AWSIoTMQTTClient myAWSIoTMQTTClient = None if useWebsocket: - myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId, useWebsocket=True) - myAWSIoTMQTTClient.configureEndpoint(host, 443) - myAWSIoTMQTTClient.configureCredentials(rootCAPath) + myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId, useWebsocket=True) + myAWSIoTMQTTClient.configureEndpoint(host, 443) + myAWSIoTMQTTClient.configureCredentials(rootCAPath) else: - myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId) - myAWSIoTMQTTClient.configureEndpoint(host, 8883) - myAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath) + myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId) + myAWSIoTMQTTClient.configureEndpoint(host, 8883) + myAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath) # AWSIoTMQTTClient connection configuration myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20) @@ -91,6 +93,6 @@ def customCallback(client, userdata, message): # Publish to the same topic in a loop forever loopCount = 0 while True: - myAWSIoTMQTTClient.publish(topic, "New Message " + str(loopCount), 1) - loopCount += 1 - time.sleep(1) + myAWSIoTMQTTClient.publish(topic, "New Message " + str(loopCount), 1) + loopCount += 1 + time.sleep(1) diff --git a/samples/basicPubSub/basicPubSubAsync.py b/samples/basicPubSub/basicPubSubAsync.py new file mode 100644 index 0000000..d8f6ded --- /dev/null +++ b/samples/basicPubSub/basicPubSubAsync.py @@ -0,0 +1,116 @@ +''' +/* + * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + ''' + +from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient +import logging +import time +import argparse + + +# General message notification callback +def customOnMessage(message): + print("Received a new message: ") + print(message.payload) + print("from topic: ") + print(message.topic) + print("--------------\n\n") + + +# Suback callback +def customSubackCallback(mid, data): + print("Received SUBACK packet id: ") + print(mid) + print("Granted QoS: ") + print(data) + print("++++++++++++++\n\n") + + +# Puback callback +def customPubackCallback(mid): + print("Received PUBACK packet id: ") + print(mid) + print("++++++++++++++\n\n") + + +# Read in command-line parameters +parser = argparse.ArgumentParser() +parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="Your AWS IoT custom endpoint") +parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path") +parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="Certificate file path") +parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="Private key file path") +parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False, + help="Use MQTT over WebSocket") +parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicPubSub", + help="Targeted client id") +parser.add_argument("-t", "--topic", action="store", dest="topic", default="sdk/test/Python", help="Targeted topic") + +args = parser.parse_args() +host = args.host +rootCAPath = args.rootCAPath +certificatePath = args.certificatePath +privateKeyPath = args.privateKeyPath +useWebsocket = args.useWebsocket +clientId = args.clientId +topic = args.topic + +if args.useWebsocket and args.certificatePath and args.privateKeyPath: + parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.") + exit(2) + +if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath): + parser.error("Missing credentials for authentication.") + exit(2) + +# Configure logging +logger = logging.getLogger("AWSIoTPythonSDK.core") +logger.setLevel(logging.DEBUG) +streamHandler = logging.StreamHandler() +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +streamHandler.setFormatter(formatter) +logger.addHandler(streamHandler) + +# Init AWSIoTMQTTClient +myAWSIoTMQTTClient = None +if useWebsocket: + myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId, useWebsocket=True) + myAWSIoTMQTTClient.configureEndpoint(host, 443) + myAWSIoTMQTTClient.configureCredentials(rootCAPath) +else: + myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId) + myAWSIoTMQTTClient.configureEndpoint(host, 8883) + myAWSIoTMQTTClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath) + +# AWSIoTMQTTClient connection configuration +myAWSIoTMQTTClient.configureAutoReconnectBackoffTime(1, 32, 20) +myAWSIoTMQTTClient.configureOfflinePublishQueueing(-1) # Infinite offline Publish queueing +myAWSIoTMQTTClient.configureDrainingFrequency(2) # Draining: 2 Hz +myAWSIoTMQTTClient.configureConnectDisconnectTimeout(10) # 10 sec +myAWSIoTMQTTClient.configureMQTTOperationTimeout(5) # 5 sec +myAWSIoTMQTTClient.onMessage = customOnMessage + +# Connect and subscribe to AWS IoT +myAWSIoTMQTTClient.connect() +# Note that we are not putting a message callback here. We are using the general message notification callback. +myAWSIoTMQTTClient.subscribeAsync(topic, 1, ackCallback=customSubackCallback) +time.sleep(2) + +# Publish to the same topic in a loop forever +loopCount = 0 +while True: + myAWSIoTMQTTClient.publishAsync(topic, "New Message " + str(loopCount), 1, ackCallback=customPubackCallback) + loopCount += 1 + time.sleep(1) diff --git a/samples/basicPubSub/basicPubSub_CognitoSTS.py b/samples/basicPubSub/basicPubSub_CognitoSTS.py index f6d20e7..d67e624 100755 --- a/samples/basicPubSub/basicPubSub_CognitoSTS.py +++ b/samples/basicPubSub/basicPubSub_CognitoSTS.py @@ -1,6 +1,6 @@ ''' /* - * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -17,25 +17,28 @@ import boto3 from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient -import sys import logging import time import argparse + # Custom MQTT message callback def customCallback(client, userdata, message): - print("Received a new message: ") - print(message.payload) - print("from topic: ") - print(message.topic) - print("--------------\n\n") + print("Received a new message: ") + print(message.payload) + print("from topic: ") + print(message.topic) + print("--------------\n\n") + # Read in command-line parameters parser = argparse.ArgumentParser() parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="Your AWS IoT custom endpoint") parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path") -parser.add_argument("-C", "--CognitoIdentityPoolID", action="store", required=True, dest="cognitoIdentityPoolID", help="Your AWS Cognito Identity Pool ID") -parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicPubSub_CognitoSTS", help="Targeted client id") +parser.add_argument("-C", "--CognitoIdentityPoolID", action="store", required=True, dest="cognitoIdentityPoolID", + help="Your AWS Cognito Identity Pool ID") +parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicPubSub_CognitoSTS", + help="Targeted client id") parser.add_argument("-t", "--topic", action="store", dest="topic", default="sdk/test/Python", help="Targeted topic") args = parser.parse_args() @@ -89,6 +92,6 @@ def customCallback(client, userdata, message): # Publish to the same topic in a loop forever loopCount = 0 while True: - myAWSIoTMQTTClient.publish(topic, "New Message " + str(loopCount), 1) - loopCount += 1 - time.sleep(1) + myAWSIoTMQTTClient.publish(topic, "New Message " + str(loopCount), 1) + loopCount += 1 + time.sleep(1) diff --git a/samples/basicShadow/basicShadowDeltaListener.py b/samples/basicShadow/basicShadowDeltaListener.py index 86d2b5c..b1b7a44 100755 --- a/samples/basicShadow/basicShadowDeltaListener.py +++ b/samples/basicShadow/basicShadowDeltaListener.py @@ -1,6 +1,6 @@ ''' /* - * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,12 +16,12 @@ ''' from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient -import sys import logging import time import json import argparse + # Shadow JSON schema: # # Name: Bot @@ -31,18 +31,19 @@ # "property": # } # } -#} +# } # Custom Shadow callback def customShadowCallback_Delta(payload, responseStatus, token): - # payload is a JSON string ready to be parsed using json.loads(...) - # in both Py2.x and Py3.x - print(responseStatus) - payloadDict = json.loads(payload) - print("++++++++DELTA++++++++++") - print("property: " + str(payloadDict["state"]["property"])) - print("version: " + str(payloadDict["version"])) - print("+++++++++++++++++++++++\n\n") + # payload is a JSON string ready to be parsed using json.loads(...) + # in both Py2.x and Py3.x + print(responseStatus) + payloadDict = json.loads(payload) + print("++++++++DELTA++++++++++") + print("property: " + str(payloadDict["state"]["property"])) + print("version: " + str(payloadDict["version"])) + print("+++++++++++++++++++++++\n\n") + # Read in command-line parameters parser = argparse.ArgumentParser() @@ -53,7 +54,8 @@ def customShadowCallback_Delta(payload, responseStatus, token): parser.add_argument("-w", "--websocket", action="store_true", dest="useWebsocket", default=False, help="Use MQTT over WebSocket") parser.add_argument("-n", "--thingName", action="store", dest="thingName", default="Bot", help="Targeted thing name") -parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicShadowDeltaListener", help="Targeted client id") +parser.add_argument("-id", "--clientId", action="store", dest="clientId", default="basicShadowDeltaListener", + help="Targeted client id") args = parser.parse_args() host = args.host @@ -65,12 +67,12 @@ def customShadowCallback_Delta(payload, responseStatus, token): clientId = args.clientId if args.useWebsocket and args.certificatePath and args.privateKeyPath: - parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.") - exit(2) + parser.error("X.509 cert authentication and WebSocket are mutual exclusive. Please pick one.") + exit(2) if not args.useWebsocket and (not args.certificatePath or not args.privateKeyPath): - parser.error("Missing credentials for authentication.") - exit(2) + parser.error("Missing credentials for authentication.") + exit(2) # Configure logging logger = logging.getLogger("AWSIoTPythonSDK.core") @@ -83,13 +85,13 @@ def customShadowCallback_Delta(payload, responseStatus, token): # Init AWSIoTMQTTShadowClient myAWSIoTMQTTShadowClient = None if useWebsocket: - myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId, useWebsocket=True) - myAWSIoTMQTTShadowClient.configureEndpoint(host, 443) - myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath) + myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId, useWebsocket=True) + myAWSIoTMQTTShadowClient.configureEndpoint(host, 443) + myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath) else: - myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId) - myAWSIoTMQTTShadowClient.configureEndpoint(host, 8883) - myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath) + myAWSIoTMQTTShadowClient = AWSIoTMQTTShadowClient(clientId) + myAWSIoTMQTTShadowClient.configureEndpoint(host, 8883) + myAWSIoTMQTTShadowClient.configureCredentials(rootCAPath, privateKeyPath, certificatePath) # AWSIoTMQTTShadowClient configuration myAWSIoTMQTTShadowClient.configureAutoReconnectBackoffTime(1, 32, 20) @@ -107,4 +109,4 @@ def customShadowCallback_Delta(payload, responseStatus, token): # Loop forever while True: - time.sleep(1) + time.sleep(1) diff --git a/samples/basicShadow/basicShadowUpdater.py b/samples/basicShadow/basicShadowUpdater.py index 8b7d39f..c2e202c 100755 --- a/samples/basicShadow/basicShadowUpdater.py +++ b/samples/basicShadow/basicShadowUpdater.py @@ -1,6 +1,6 @@ ''' /* - * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ ''' from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTShadowClient -import sys import logging import time import json diff --git a/samples/greengrass/basicDiscovery.py b/samples/greengrass/basicDiscovery.py new file mode 100644 index 0000000..446bd32 --- /dev/null +++ b/samples/greengrass/basicDiscovery.py @@ -0,0 +1,157 @@ +# /* +# * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# * +# * Licensed under the Apache License, Version 2.0 (the "License"). +# * You may not use this file except in compliance with the License. +# * A copy of the License is located at +# * +# * http://aws.amazon.com/apache2.0 +# * +# * or in the "license" file accompanying this file. This file is distributed +# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# * express or implied. See the License for the specific language governing +# * permissions and limitations under the License. +# */ + + +import os +import sys +import time +import uuid +import logging +import argparse +from AWSIoTPythonSDK.core.greengrass.discovery.providers import DiscoveryInfoProvider +from AWSIoTPythonSDK.core.protocol.connection.cores import ProgressiveBackOffCore +from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient +from AWSIoTPythonSDK.exception.AWSIoTExceptions import DiscoveryInvalidRequestException + + +# General message notification callback +def customOnMessage(message): + print("Received a new message: ") + print(message.payload) + print("from topic: ") + print(message.topic) + print("--------------\n\n") + + +MAX_DISCOVERY_RETRIES = 10 +GROUP_CA_PATH = "./groupCA/" + +# Read in command-line parameters +parser = argparse.ArgumentParser() +parser.add_argument("-e", "--endpoint", action="store", required=True, dest="host", help="Your AWS IoT custom endpoint") +parser.add_argument("-r", "--rootCA", action="store", required=True, dest="rootCAPath", help="Root CA file path") +parser.add_argument("-c", "--cert", action="store", dest="certificatePath", help="Certificate file path") +parser.add_argument("-k", "--key", action="store", dest="privateKeyPath", help="Private key file path") +parser.add_argument("-n", "--thingName", action="store", dest="thingName", default="Bot", help="Targeted thing name") +parser.add_argument("-t", "--topic", action="store", dest="topic", default="sdk/test/Python", help="Targeted topic") + +args = parser.parse_args() +host = args.host +rootCAPath = args.rootCAPath +certificatePath = args.certificatePath +privateKeyPath = args.privateKeyPath +clientId = args.thingName +thingName = args.thingName +topic = args.topic + +if not args.certificatePath or not args.privateKeyPath: + parser.error("Missing credentials for authentication.") + exit(2) + +# Configure logging +logger = logging.getLogger("AWSIoTPythonSDK.core") +logger.setLevel(logging.DEBUG) +streamHandler = logging.StreamHandler() +formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') +streamHandler.setFormatter(formatter) +logger.addHandler(streamHandler) + +# Progressive back off core +backOffCore = ProgressiveBackOffCore() + +# Discover GGCs +discoveryInfoProvider = DiscoveryInfoProvider() +discoveryInfoProvider.configureEndpoint(host) +discoveryInfoProvider.configureCredentials(rootCAPath, certificatePath, privateKeyPath) +discoveryInfoProvider.configureTimeout(10) # 10 sec + +retryCount = MAX_DISCOVERY_RETRIES +discovered = False +groupCA = None +coreInfo = None +while retryCount != 0: + try: + discoveryInfo = discoveryInfoProvider.discover(thingName) + caList = discoveryInfo.getAllCas() + coreList = discoveryInfo.getAllCores() + + # We only pick the first ca and core info + groupId, ca = caList[0] + coreInfo = coreList[0] + print("Discovered GGC: %s from Group: %s" % (coreInfo.coreThingArn, groupId)) + + print("Now we persist the connectivity/identity information...") + groupCA = GROUP_CA_PATH + groupId + "_CA_" + str(uuid.uuid4()) + ".crt" + if not os.path.exists(GROUP_CA_PATH): + os.makedirs(GROUP_CA_PATH) + groupCAFile = open(groupCA, "w") + groupCAFile.write(ca) + groupCAFile.close() + + discovered = True + print("Now proceed to the connecting flow...") + break + except DiscoveryInvalidRequestException as e: + print("Invalid discovery request detected!") + print("Type: %s" % str(type(e))) + print("Error message: %s" % e.message) + print("Stopping...") + break + except BaseException as e: + print("Error in discovery!") + print("Type: %s" % str(type(e))) + print("Error message: %s" % e.message) + retryCount -= 1 + print("\n%d/%d retries left\n" % (retryCount, MAX_DISCOVERY_RETRIES)) + print("Backing off...\n") + backOffCore.backOff() + +if not discovered: + print("Discovery failed after %d retries. Exiting...\n" % (MAX_DISCOVERY_RETRIES)) + sys.exit(-1) + +# Iterate through all connection options for the core and use the first successful one +myAWSIoTMQTTClient = AWSIoTMQTTClient(clientId) +myAWSIoTMQTTClient.configureCredentials(groupCA, privateKeyPath, certificatePath) +myAWSIoTMQTTClient.onMessage = customOnMessage + +connected = False +for connectivityInfo in coreInfo.connectivityInfoList: + currentHost = connectivityInfo.host + currentPort = connectivityInfo.port + print("Trying to connect to core at %s:%d" % (currentHost, currentPort)) + myAWSIoTMQTTClient.configureEndpoint(currentHost, currentPort) + try: + myAWSIoTMQTTClient.connect() + connected = True + break + except BaseException as e: + print("Error in connect!") + print("Type: %s" % str(type(e))) + print("Error message: %s" % e.message) + +if not connected: + print("Cannot connect to core %s. Exiting..." % coreInfo.coreThingArn) + sys.exit(-2) + +# Successfully connected to the core +myAWSIoTMQTTClient.subscribe(topic, 0, None) +time.sleep(2) + +loopCount = 0 +while True: + myAWSIoTMQTTClient.publish(topic, "New Message " + str(loopCount), 0) + loopCount += 1 + time.sleep(1) diff --git a/setup.py b/setup.py index 86ba48a..e149243 100644 --- a/setup.py +++ b/setup.py @@ -6,11 +6,11 @@ from distutils.core import setup setup( name = 'AWSIoTPythonSDK', - packages = ['AWSIoTPythonSDK', "AWSIoTPythonSDK.core", \ - "AWSIoTPythonSDK.exception", "AWSIoTPythonSDK.core.shadow", \ - "AWSIoTPythonSDK.core.util", \ - "AWSIoTPythonSDK.core.protocol", "AWSIoTPythonSDK.core.protocol.paho", \ - "AWSIoTPythonSDK.core.protocol.paho.securedWebsocket"], + packages=['AWSIoTPythonSDK', 'AWSIoTPythonSDK.core', + 'AWSIoTPythonSDK.core.util', 'AWSIoTPythonSDK.core.shadow', 'AWSIoTPythonSDK.core.protocol', + 'AWSIoTPythonSDK.core.protocol.paho', 'AWSIoTPythonSDK.core.protocol.internal', + 'AWSIoTPythonSDK.core.protocol.connection', 'AWSIoTPythonSDK.core.greengrass', + 'AWSIoTPythonSDK.core.greengrass.discovery', 'AWSIoTPythonSDK.exception'], version = currentVersion, description = 'SDK for connecting to AWS IoT using Python.', author = 'Amazon Web Service', @@ -19,15 +19,15 @@ download_url = 'https://s3.amazonaws.com/aws-iot-device-sdk-python/aws-iot-device-sdk-python-latest.zip', keywords = ['aws', 'iot', 'mqtt'], classifiers = [ - "Development Status :: 5 - Production/Stable", \ - "Intended Audience :: Developers", \ - "Natural Language :: English", \ - "License :: OSI Approved :: Apache Software License", \ - "Programming Language :: Python", \ - "Programming Language :: Python :: 2.7", \ - "Programming Language :: Python :: 3", \ - "Programming Language :: Python :: 3.3", \ - "Programming Language :: Python :: 3.4", \ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Natural Language :: English", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.3", + "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5" ] )