diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md index 698d3a2..f281a70 100644 --- a/DEVELOPER_GUIDE.md +++ b/DEVELOPER_GUIDE.md @@ -32,12 +32,13 @@ hatch run pre-commit install On contribution: 1. Add desired example under `examples/` -2. Update `tests/ui/user.py` to include your new example +2. Update `tests/ui/inputs.py` to include your new example 3. Then run the following commands, updating and ```bash hatch run pytest -s -m ui --screenshot on --video on --headed -k hatch run docs-build +hatch run docs-serve # to make sure everything looks correct git checkout -b git add commit git push origin diff --git a/docs/applicable_recipes.md b/docs/applicable_recipes.md index 3888be0..49789b1 100644 --- a/docs/applicable_recipes.md +++ b/docs/applicable_recipes.md @@ -1,6 +1,105 @@ # Applicable Recipes Demonstrates how to use Panel's chat components to achieve specific tasks with popular LLM packages. +## Openai Images Dall E + + + + + + +
+ +Source code for openai_images_dall_e.py + +```python +import panel as pn +from openai import AsyncOpenAI + +pn.extension() + + +async def callback(contents: str, user: str, instance: pn.chat.ChatInterface): + if api_key_input.value: + # use api_key_input.value if set, otherwise use OPENAI_API_KEY + aclient.api_key = api_key_input.value + + response = await aclient.images.generate( + model=model_buttons.value, + prompt=contents, + n=n_images_slider.value, + size=size_buttons.value, + ) + + image_panes = [(str(i), pn.pane.Image(data.url)) for i, data in enumerate(response.data)] + return pn.Tabs(*image_panes) if len(image_panes) > 1 else image_panes[0][1] + + +def update_model_params(model): + if model == "dall-e-2": + size_buttons.param.update( + options=["256x256", "512x512", "1024x1024"], + value="256x256", + ) + n_images_slider.param.update( + start=1, + end=10, + value=1, + ) + else: + size_buttons.param.update( + options=["1024x1024", "1024x1792", "1792x1024"], + value="1024x1024", + ) + n_images_slider.param.update( + start=1, + end=1, + value=1, + ) + + +aclient = AsyncOpenAI() +api_key_input = pn.widgets.PasswordInput( + placeholder="sk-... uses $OPENAI_API_KEY if not set", + sizing_mode="stretch_width", + styles={"color": "black"}, +) +model_buttons = pn.widgets.RadioButtonGroup( + options=["dall-e-2", "dall-e-3"], + value="dall-e-2", + name="Model", + sizing_mode="stretch_width", +) +size_buttons = pn.widgets.RadioButtonGroup( + options=["256x256", "512x512", "1024x1024"], + name="Size", + sizing_mode="stretch_width", +) +n_images_slider = pn.widgets.IntSlider( + start=1, end=10, value=1, name="Number of images" +) +pn.bind(update_model_params, model_buttons, watch=True) +chat_interface = pn.chat.ChatInterface( + callback=callback, + callback_user="DALL·E", + help_text="Send a message to get a reply from DALL·E!", +) +template = pn.template.BootstrapTemplate( + title="OpenAI DALL·E", + header_background="#212121", + main=[chat_interface], + header=[api_key_input], + sidebar=[model_buttons, size_buttons, n_images_slider], +) +template.servable() +``` +
+ + ## Langchain Chat With Pdf Demonstrates how to use the `ChatInterface` to chat about a PDF using diff --git a/docs/assets/thumbnails/openai_images_dall_e.png b/docs/assets/thumbnails/openai_images_dall_e.png new file mode 100644 index 0000000..6c0531b Binary files /dev/null and b/docs/assets/thumbnails/openai_images_dall_e.png differ diff --git a/docs/assets/videos/openai_images_dall_e.mp4 b/docs/assets/videos/openai_images_dall_e.mp4 new file mode 100644 index 0000000..7311703 Binary files /dev/null and b/docs/assets/videos/openai_images_dall_e.mp4 differ diff --git a/docs/examples/applicable_recipes/openai_images_dall_e.py b/docs/examples/applicable_recipes/openai_images_dall_e.py new file mode 100644 index 0000000..ead9686 --- /dev/null +++ b/docs/examples/applicable_recipes/openai_images_dall_e.py @@ -0,0 +1,85 @@ +""" +Use the OpenAI API to generate images using the DALL·E model. +""" + +import panel as pn +from openai import AsyncOpenAI + +pn.extension() + + +async def callback(contents: str, user: str, instance: pn.chat.ChatInterface): + if api_key_input.value: + # use api_key_input.value if set, otherwise use OPENAI_API_KEY + aclient.api_key = api_key_input.value + + response = await aclient.images.generate( + model=model_buttons.value, + prompt=contents, + n=n_images_slider.value, + size=size_buttons.value, + ) + + image_panes = [ + (str(i), pn.pane.Image(data.url)) for i, data in enumerate(response.data) + ] + return pn.Tabs(*image_panes) if len(image_panes) > 1 else image_panes[0][1] + + +def update_model_params(model): + if model == "dall-e-2": + size_buttons.param.update( + options=["256x256", "512x512", "1024x1024"], + value="256x256", + ) + n_images_slider.param.update( + start=1, + end=10, + value=1, + ) + else: + size_buttons.param.update( + options=["1024x1024", "1024x1792", "1792x1024"], + value="1024x1024", + ) + n_images_slider.param.update( + start=1, + end=1, + value=1, + ) + + +aclient = AsyncOpenAI() +api_key_input = pn.widgets.PasswordInput( + placeholder="sk-... uses $OPENAI_API_KEY if not set", + sizing_mode="stretch_width", + styles={"color": "black"}, +) +model_buttons = pn.widgets.RadioButtonGroup( + options=["dall-e-2", "dall-e-3"], + value="dall-e-2", + name="Model", + sizing_mode="stretch_width", +) +size_buttons = pn.widgets.RadioButtonGroup( + options=["256x256", "512x512", "1024x1024"], + name="Size", + sizing_mode="stretch_width", +) +n_images_slider = pn.widgets.IntSlider( + start=1, end=10, value=1, name="Number of images" +) +pn.bind(update_model_params, model_buttons, watch=True) +chat_interface = pn.chat.ChatInterface( + callback=callback, + callback_user="DALL·E", + help_text="Send a message to get a reply from DALL·E!", +) +template = pn.template.BootstrapTemplate( + title="OpenAI DALL·E", + header_background="#212121", + main=[chat_interface], + header=[api_key_input], + sidebar=[model_buttons, size_buttons, n_images_slider], +) +template.servable() diff --git a/docs/kickstart_snippets.md b/docs/kickstart_snippets.md index aa4fbdb..ed4146c 100644 --- a/docs/kickstart_snippets.md +++ b/docs/kickstart_snippets.md @@ -391,6 +391,14 @@ Highlights: - Uses `serialize` to get chat history from the `ChatInterface`. - Uses `yield` to continuously concatenate the parts of the response + + + +
Source code for llama_index_.py diff --git a/pyproject.toml b/pyproject.toml index 1cf27f3..f93bdb7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,12 +47,12 @@ test-cov-xml = "pytest --cov-report=xml" lint = [ "isort .", "black .", - "ruff --fix .", + "ruff check --fix .", ] lint-check = [ "isort . --check-only", "black --check .", - "ruff .", + "ruff check .", ] docs-serve = "python scripts/generate_gallery.py;mkdocs serve" docs-build = "python scripts/postprocess_videos.py;python scripts/generate_gallery.py;mkdocs build" diff --git a/tests/ui/inputs.py b/tests/ui/inputs.py index 88cfaa6..bceba4d 100644 --- a/tests/ui/inputs.py +++ b/tests/ui/inputs.py @@ -116,6 +116,12 @@ def openai_chat_with_hvplot(page: Page): page.wait_for_timeout(4000) +def openai_images_dall_e(page: Page): + chat = ChatInterface(page) + chat.send("Create a complex HoloViz dashboard") + page.wait_for_timeout(12000) + + # get all the local functions here # and put them in a dict # so we can call them by name like {"openai_two_bots.py": openai_two_bots}