diff --git a/Dockerfile b/Dockerfile
index 8945326db..2aaed1ffb 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -12,11 +12,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG BASE_IMAGE=nvcr.io/nvidia/tritonserver:23.08-py3
-ARG TRITONSDK_BASE_IMAGE=nvcr.io/nvidia/tritonserver:23.08-py3-sdk
+ARG BASE_IMAGE=nvcr.io/nvidia/tritonserver:23.09-py3
+ARG TRITONSDK_BASE_IMAGE=nvcr.io/nvidia/tritonserver:23.09-py3-sdk
-ARG MODEL_ANALYZER_VERSION=1.32.0dev
-ARG MODEL_ANALYZER_CONTAINER_VERSION=23.09dev
+ARG MODEL_ANALYZER_VERSION=1.32.0
+ARG MODEL_ANALYZER_CONTAINER_VERSION=23.09
FROM ${TRITONSDK_BASE_IMAGE} as sdk
diff --git a/README.md b/README.md
index 778888abe..0ee96e8d8 100644
--- a/README.md
+++ b/README.md
@@ -21,9 +21,9 @@ limitations under the License.
>**LATEST RELEASE:**
You are currently on the `main` branch which tracks
under-development progress towards the next release.
The latest
-release of the Triton Model Analyzer is 1.31.0 and is available on
+release of the Triton Model Analyzer is 1.32.0 and is available on
branch
-[r23.08](https://github.com/triton-inference-server/model_analyzer/tree/r23.08).
+[r23.09](https://github.com/triton-inference-server/model_analyzer/tree/r23.09).
Triton Model Analyzer is a CLI tool which can help you find a more optimal configuration, on a given piece of hardware, for single, multiple, ensemble, or BLS models running on a [Triton Inference Server](https://github.com/triton-inference-server/server/). Model Analyzer will also generate reports to help you better understand the trade-offs of the different configurations along with their compute and memory requirements.
diff --git a/VERSION b/VERSION
index d45493b04..359c41089 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.32.0dev
+1.32.0
diff --git a/docs/config.md b/docs/config.md
index 0cdedb546..ca6db1608 100644
--- a/docs/config.md
+++ b/docs/config.md
@@ -153,7 +153,7 @@ cpu_only_composing_models:
[ reload_model_disable: | default: false]
# Triton Docker image tag used when launching using Docker mode
-[ triton_docker_image: | default: nvcr.io/nvidia/tritonserver:23.08-py3 ]
+[ triton_docker_image: | default: nvcr.io/nvidia/tritonserver:23.09-py3 ]
# Triton Server HTTP endpoint url used by Model Analyzer client"
[ triton_http_endpoint: | default: localhost:8000 ]
diff --git a/docs/kubernetes_deploy.md b/docs/kubernetes_deploy.md
index 0d76e2e9c..0ef7067cd 100644
--- a/docs/kubernetes_deploy.md
+++ b/docs/kubernetes_deploy.md
@@ -79,7 +79,7 @@ images:
triton:
image: nvcr.io/nvidia/tritonserver
- tag: 23.08-py3
+ tag: 23.09-py3
```
The model analyzer executable uses the config file defined in `helm-chart/templates/config-map.yaml`. This config can be modified to supply arguments to model analyzer. Only the content under the `config.yaml` section of the file should be modified.
diff --git a/docs/mm_quick_start.md b/docs/mm_quick_start.md
index 3ac7ff7ff..628894014 100644
--- a/docs/mm_quick_start.md
+++ b/docs/mm_quick_start.md
@@ -49,7 +49,7 @@ git pull origin main
**1. Pull the SDK container:**
```
-docker pull nvcr.io/nvidia/tritonserver:23.08-py3-sdk
+docker pull nvcr.io/nvidia/tritonserver:23.09-py3-sdk
```
**2. Run the SDK container**
@@ -58,7 +58,7 @@ docker pull nvcr.io/nvidia/tritonserver:23.08-py3-sdk
docker run -it --gpus all \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $(pwd)/examples/quick-start:$(pwd)/examples/quick-start \
- --net=host nvcr.io/nvidia/tritonserver:23.08-py3-sdk
+ --net=host nvcr.io/nvidia/tritonserver:23.09-py3-sdk
```
## `Step 3:` Profile both models concurrently
diff --git a/docs/quick_start.md b/docs/quick_start.md
index e934727d5..a2a54ed8c 100644
--- a/docs/quick_start.md
+++ b/docs/quick_start.md
@@ -49,7 +49,7 @@ git pull origin main
**1. Pull the SDK container:**
```
-docker pull nvcr.io/nvidia/tritonserver:23.08-py3-sdk
+docker pull nvcr.io/nvidia/tritonserver:23.09-py3-sdk
```
**2. Run the SDK container**
@@ -58,7 +58,7 @@ docker pull nvcr.io/nvidia/tritonserver:23.08-py3-sdk
docker run -it --gpus all \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $(pwd)/examples/quick-start:$(pwd)/examples/quick-start \
- --net=host nvcr.io/nvidia/tritonserver:23.08-py3-sdk
+ --net=host nvcr.io/nvidia/tritonserver:23.09-py3-sdk
```
## `Step 3:` Profile the `add_sub` model
diff --git a/helm-chart/values.yaml b/helm-chart/values.yaml
index d87127d1b..a95f80ae8 100644
--- a/helm-chart/values.yaml
+++ b/helm-chart/values.yaml
@@ -41,4 +41,4 @@ images:
triton:
image: nvcr.io/nvidia/tritonserver
- tag: 23.08-py3
+ tag: 23.09-py3
diff --git a/model_analyzer/config/input/config_defaults.py b/model_analyzer/config/input/config_defaults.py
index 22f0c9821..67c62dca9 100755
--- a/model_analyzer/config/input/config_defaults.py
+++ b/model_analyzer/config/input/config_defaults.py
@@ -56,7 +56,7 @@
DEFAULT_RUN_CONFIG_PROFILE_MODELS_CONCURRENTLY_ENABLE = False
DEFAULT_REQUEST_RATE_SEARCH_ENABLE = False
DEFAULT_TRITON_LAUNCH_MODE = "local"
-DEFAULT_TRITON_DOCKER_IMAGE = "nvcr.io/nvidia/tritonserver:23.08-py3"
+DEFAULT_TRITON_DOCKER_IMAGE = "nvcr.io/nvidia/tritonserver:23.09-py3"
DEFAULT_TRITON_HTTP_ENDPOINT = "localhost:8000"
DEFAULT_TRITON_GRPC_ENDPOINT = "localhost:8001"
DEFAULT_TRITON_METRICS_URL = "http://localhost:8002/metrics"