diff --git a/docs/install/install-script.md b/docs/install/install-script.md index 513d5daf..c129444b 100644 --- a/docs/install/install-script.md +++ b/docs/install/install-script.md @@ -14,7 +14,7 @@ We provide an install script to quickly run ModelMesh Serving with a provisioned The install script has a `--quickstart` option for setting up a self-contained ModelMesh Serving instance. This will deploy and configure local etcd and MinIO servers in the same Kubernetes namespace. Note that this is only for experimentation and/or development use - in particular the connections to these datastores are not secure and the etcd cluster is a single member which is not highly available. Use of `--quickstart` also configures the `storage-config` secret to be able to pull from the [ModelMesh Serving example models bucket](../example-models.md) which contains the model data for the sample `InferenceService`s. For complete details on the manifests applied with `--quickstart` see [config/dependencies/quickstart.yaml](https://github.com/kserve/modelmesh-serving/blob/main/config/dependencies/quickstart.yaml). -## Setup the etcd connection information +## Set up the etcd connection information If the `--quickstart` install option is **not** being used, details of an existing etcd cluster must be specified prior to installation. Otherwise, please skip this step and proceed to [Installation](#installation). diff --git a/docs/quickstart.md b/docs/quickstart.md index 759b3ea5..37578516 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -22,7 +22,7 @@ cd modelmesh-serving ```shell kubectl create namespace modelmesh-serving -./scripts/install.sh --namespace modelmesh-serving --quickstart +./scripts/install.sh --namespace-scope-mode --namespace modelmesh-serving --quickstart ``` This will install ModelMesh Serving in the `modelmesh-serving` namespace, along with an etcd and MinIO instances. @@ -53,6 +53,7 @@ kubectl get servingruntimes NAME DISABLED MODELTYPE CONTAINERS AGE mlserver-0.x sklearn mlserver 5m ovms-1.x openvino_ir ovms 5m +torchserve-0.x pytorch-mar torchserve 5m triton-2.x tensorflow triton 5m ``` @@ -62,9 +63,10 @@ are: | ServingRuntime | Supported Frameworks | | -------------- | ----------------------------------- | -| triton-2.x | tensorflow, pytorch, onnx, tensorrt | | mlserver-0.x | sklearn, xgboost, lightgbm | | ovms-1.x | openvino_ir, onnx | +| torchserve-0.x | pytorch-mar | +| triton-2.x | tensorflow, pytorch, onnx, tensorrt | ## 2. Deploy a model diff --git a/scripts/install.sh b/scripts/install.sh index 34b31556..a73ac028 100755 --- a/scripts/install.sh +++ b/scripts/install.sh @@ -283,7 +283,8 @@ info "Installing ModelMesh Serving RBACs (namespace_scope_mode=$namespace_scope_ if [[ $namespace_scope_mode == "true" ]]; then kustomize build rbac/namespace-scope | kubectl apply -f - # We don't install the ClusterServingRuntime CRD when in namespace scope mode, so comment it out first in the CRD manifest file - sed -i 's/- bases\/serving.kserve.io_clusterservingruntimes.yaml/#- bases\/serving.kserve.io_clusterservingruntimes.yaml/g' crd/kustomization.yaml + sed -i.bak 's/- bases\/serving.kserve.io_clusterservingruntimes.yaml/#- bases\/serving.kserve.io_clusterservingruntimes.yaml/g' crd/kustomization.yaml + rm crd/kustomization.yaml.bak else kustomize build rbac/cluster-scope | kubectl apply -f - fi @@ -299,8 +300,9 @@ fi if [[ $namespace_scope_mode == "true" ]]; then info "Enabling namespace scope mode" kubectl set env deploy/modelmesh-controller NAMESPACE_SCOPE=true - # Reset crd/kustomization.yaml back to CSR crd since we used the same file for namespace scope mode installation - sed -i 's/#- bases\/serving.kserve.io_clusterservingruntimes.yaml/- bases\/serving.kserve.io_clusterservingruntimes.yaml/g' crd/kustomization.yaml + # Reset crd/kustomization.yaml back to CSR crd since we used the same file for namespace scope mode installation + sed -i.bak 's/#- bases\/serving.kserve.io_clusterservingruntimes.yaml/- bases\/serving.kserve.io_clusterservingruntimes.yaml/g' crd/kustomization.yaml + rm crd/kustomization.yaml.bak fi info "Waiting for ModelMesh Serving controller pod to be up..."