From 9164d9a6526eeaefbf9c7b4ac78a7fb9ad31bf93 Mon Sep 17 00:00:00 2001 From: jiqing-feng Date: Wed, 27 Dec 2023 10:28:12 -0500 Subject: [PATCH] add ipex --- examples/textual_inversion/README.md | 7 +++++++ examples/textual_inversion/textual_inversion.py | 2 +- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/examples/textual_inversion/README.md b/examples/textual_inversion/README.md index 0a2723f0982f..3d60ecc7459c 100644 --- a/examples/textual_inversion/README.md +++ b/examples/textual_inversion/README.md @@ -94,6 +94,13 @@ to a number larger than one, *e.g.*: --num_vectors 5 ``` +**CPU**: If you run on Intel Gen 4th Xeon (and later), use ipex and bf16 will get a significant acceleration. +You need to add `--mixed_precision="bf16"` and `--use_ipex` in the command and install the following package: +``` +pip install torch==2.0.1 torchvision==0.15.2 torchaudio==2.0.2 --index-url https://download.pytorch.org/whl/cpu +pip install intel-extension-for-pytorch==2.0.0 +``` + The saved textual inversion vectors will then be larger in size compared to the default case. ### Inference diff --git a/examples/textual_inversion/textual_inversion.py b/examples/textual_inversion/textual_inversion.py index b3acb2ba7e98..af1bf3dcdf6d 100644 --- a/examples/textual_inversion/textual_inversion.py +++ b/examples/textual_inversion/textual_inversion.py @@ -349,7 +349,7 @@ def parse_args(): action="store_true", help=( "Whether or not to use ipex to accelerate the training process," - "requires Intel Gen 3rd Xeon (and later) or Intel XPU (PVC)" + "requires Intel Gen 3rd Xeon (and later)" ), ) parser.add_argument(