From 8a9cc89dabc77a059812e3a80b2f0ee94ee8d18f Mon Sep 17 00:00:00 2001 From: Levko Kravets Date: Wed, 7 Feb 2024 19:12:26 +0200 Subject: [PATCH] Prepare release 1.8.0 Signed-off-by: Levko Kravets --- CHANGELOG.md | 41 +++++++++++++++++++++++++++++++++++++++++ package-lock.json | 4 ++-- package.json | 2 +- 3 files changed, 44 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff7fd0a8..07c08e20 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,46 @@ # Release History +## 1.8.0 + +### Highlights + +- Retry failed CloudFetch requests (databricks/databricks-sql-nodejs#211) +- Fixed compatibility issues with Node@14 (databricks/databricks-sql-nodejs#219) +- Support Databricks OAuth on Azure (databricks/databricks-sql-nodejs#223) +- Support Databricks OAuth on GCP (databricks/databricks-sql-nodejs#224) +- Support LZ4 compression for Arrow and CloudFetch results (databricks/databricks-sql-nodejs#216) +- Fix OAuth M2M flow on Azure (databricks/databricks-sql-nodejs#228) + +### OAuth on Azure + +Some Azure instances now support Databricks native OAuth flow (in addition to AAD OAuth). For a backward +compatibility, library will continue using AAD Oauth flow by default. To use Databricks native OAuth, +pass `useDatabricksOAuthInAzure: true` to `client.connect()`: + +```ts +client.connect({ + // other options - host, port, etc. + authType: 'databricks-oauth', + useDatabricksOAuthInAzure: true, + // other OAuth options if needed +}); +``` + +Also, we fixed issue with AAD OAuth when wrong scopes were passed for M2M flow. + +### OAuth on GCP + +We enabled OAuth support on GCP instances. Since it uses Databricks native OAuth, +all the options are the same as for OAuth on AWS instances. + +### CloudFetch improvements + +Now library will automatically attempt to retry failed CloudFetch requests. Currently, the retry strategy is quite basic, +but it is going to be improved in the future. + +Also, we implemented a support for LZ4-compressed results (Arrow- and CloudFetch-based). It is enabled by default, +and compression will be used if server supports it. + ## 1.7.1 - Fix "Premature close" error which happened due to socket limit when intensively using library diff --git a/package-lock.json b/package-lock.json index 5aff2a58..113cccab 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "@databricks/sql", - "version": "1.7.1", + "version": "1.8.0", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "@databricks/sql", - "version": "1.7.1", + "version": "1.8.0", "license": "Apache 2.0", "dependencies": { "apache-arrow": "^13.0.0", diff --git a/package.json b/package.json index 0ae4b5ed..0c2b1c49 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@databricks/sql", - "version": "1.7.1", + "version": "1.8.0", "description": "Driver for connection to Databricks SQL via Thrift API.", "main": "dist/index.js", "types": "dist/index.d.ts",