diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3037a6c..fb989aa 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -24,6 +24,9 @@ jobs: - name: Lint and typecheck run: | hatch run lint-check + - name: Install Playwright browser(s) + run: | + hatch run playwright install chromium - name: Test run: | hatch run test-cov-xml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0131ab4..ed43308 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,6 +4,14 @@ exclude: (\.min\.js$|\.svg$|\.html$) default_stages: [commit] repos: + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + - repo: https://github.com/psf/black-pre-commit-mirror + rev: 23.9.1 + hooks: + - id: black - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.4.0 hooks: diff --git a/DEVELOPER_GUIDE.md b/DEVELOPER_GUIDE.md new file mode 100644 index 0000000..c8b408c --- /dev/null +++ b/DEVELOPER_GUIDE.md @@ -0,0 +1,141 @@ +# Development + +**Welcome. Thanks for your interest in Panel-Chat-Examples ❤️** + +You can contribute in many ways, for example, by + +- Giving our project a ⭐ on [Github](https://github.com/holoviz-topics/panel-chat-examples). +- Sharing knowledge about Panel-Chat-Examples on social media. +- Contributing very clear and easily reproducible [Bug Reports or Feature Requests](https://github.com/holoviz-topics/panel-chat-examples/issues). +- Improving our README, docs and developer infrastructure. +- Improving our collection of [examples](docs/examples). + +Before you start contributing to our code base or documentation, please make sure your contribution is well described and discussed in a [Github Issue](https://github.com/holoviz-topics/panel-chat-examples/issues). + +If you need help to get started, please reach out via [Discord](https://discord.gg/rb6gPXbdAr). + +## Getting Installed + +Start by cloning the repository + +```bash +git clone https://github.com/holoviz-topics/panel-chat-examples +cd panel-chat-examples +``` + +If you are not a core contributor you will have to work with your own fork too. See the Github [Fork a Repo](https://docs.github.com/en/get-started/quickstart/fork-a-repo) guide for more details. + +We use [Hatch](https://hatch.pypa.io/latest/install/) to manage the development environment and production build. + +Please ensure it's installed on your system with + +```bash +pip install hatch +``` + +Please ensure [Playwright](https://playwright.dev/python/) browsers are installed + +```bash +hatch run playwright install chromium +``` + +The first time `hatch run ...` is run, it will install the required dependencies. + +Please ensure `pre-commit` is installed by running + +```bash +hatch run pre-commit run --all +``` + +You will also need to set the below environment variables + +```bash +export OPENAI_API_KEY=... +``` + +Please note that you will be incurring costs from OPENAI when you run the tests or serve the apps! + +## Format, lint and type check the code + +Execute the following command to apply autoformatting, linting and check typing: + +```bash +hatch run lint +``` + +## Run all tests + +You can run all the tests with: + +```bash +hatch run test +``` + +## Run UI tests + +To run the Playwright tests in *headed* mode (i.e. show the browser) you can run + +```bash +hatch run pytest -m ui --headed +``` + +You can take screenshots via + +```bash +SCREENSHOT=true hatch run pytest -m ui +``` + +The screenshots can be found in [tests/ui/screenshots](tests/ui/screenshots) + +## Run Load tests + +To ensure the apps can be deployed for example to Hugging Face spaces we need them to load fast. +We can test the loading time with [Locust](https://docs.locust.io/en/stable/index.html). + +First you need to serve the examples + +```bash +hatch run panel-serve +``` + +Then you should run + +```bash +hatch run loadtest +``` + +Finally you can open [http://localhost:8089/](http://localhost:8089/) and click "Start swarming" + +You should make sure the RPS (Request per seconds) stay above 1. In the image below its 2.3. + +![Locust](assets/images/panel-chat-examples-locust.png) + +## Serve the documentation + +You can serve the Mkdocs documentation with livereload via: + +```bash +hatch run docs-serve +``` + +It'll automatically watch for changes in your code. + +## Publish a new version + +You can bump the version, create a commit and associated tag with one command: + +```bash +hatch version patch +``` + +```bash +hatch version minor +``` + +```bash +hatch version major +``` + +Your default Git text editor will open so you can add information about the release. + +When you push the tag on GitHub, the workflow will automatically publish it on PyPi and a GitHub release will be created as draft. diff --git a/README.md b/README.md index 237aae5..61d936a 100644 --- a/README.md +++ b/README.md @@ -12,82 +12,36 @@ THIS PROJECT IS IN EARLY STAGE AND WILL CHANGE! The examples are based on the next generation of chat features being developed in [PR #5333](https://github.com/holoviz/panel/pull/5333) -To run the examples: +To install and serve all examples: ```bash -hatch run panel serve docs/examples/**/*.py --static-dirs thumbnails=docs/assets/thumbnails --autoreload +git clone https://github.com/holoviz-topics/panel-chat-examples +cd panel-chat-examples +pip install hatch +# Set the OPENAI_API_KEY environment variable +hatch run panel-serve # or equivalently panel serve docs/examples/**/*.py --static-dirs thumbnails=docs/assets/thumbnails --autoreload ``` Note the default installation is not optimized for GPU usage. To enable GPU support for local models (i.e. not OpenAI), install `ctransformers` with the [proper backend](https://github.com/marella/ctransformers#gpu) and modify the scripts configs' accordingly, e.g. `n_gpu_layers=1` for a single GPU. CUDA: + ```bash pip install ctransformers[cuda] ``` Mac M1/2: -```bash -CT_METAL=1 pip install ctransformers --no-binary ctransformers # for m1 -``` ---- - -## Development - -### Clone repository - -`git clone https://github.com/holoviz-topics/panel-chat-examples.git` - -### Setup environment - -We use [Hatch](https://hatch.pypa.io/latest/install/) to manage the development environment and production build. Ensure it's installed on your system with `pip install hatch` - -### Run unit tests - -You can run all the tests with: ```bash -hatch run test -``` - -### Format the code - -Execute the following command to apply linting and check typing: - -```bash -hatch run lint -``` - -### Publish a new version - -You can bump the version, create a commit and associated tag with one command: - -```bash -hatch version patch -``` - -```bash -hatch version minor -``` - -```bash -hatch version major +CT_METAL=1 pip install ctransformers --no-binary ctransformers # for m1 ``` -Your default Git text editor will open so you can add information about the release. - -When you push the tag on GitHub, the workflow will automatically publish it on PyPi and a GitHub release will be created as draft. - -## Serve the documentation - -You can serve the Mkdocs documentation with: +--- -```bash -python scripts/generate_gallery.py -hatch run docs-serve -``` +## Contributing -It'll automatically watch for changes in your code. +We would ❤️ to collaborate with you. Check out the [DEVELOPER GUIDE](DEVELOPER_GUIDE.md) for to get started. ## License diff --git a/docs/assets/images/panel-chat-examples-locust.png b/docs/assets/images/panel-chat-examples-locust.png new file mode 100644 index 0000000..0f9e763 Binary files /dev/null and b/docs/assets/images/panel-chat-examples-locust.png differ diff --git a/docs/examples/basics/echo_stream.py b/docs/examples/basics/echo_stream.py index a509996..8c0e6fe 100644 --- a/docs/examples/basics/echo_stream.py +++ b/docs/examples/basics/echo_stream.py @@ -1,7 +1,9 @@ """ -Demonstrates how to use the `ChatInterface` and a `callback` function to stream back responses. +Demonstrates how to use the `ChatInterface` and a `callback` function to stream back +responses. -The chatbot Assistant echoes back the message entered by the User in a *streaming* fashion. +The chatbot Assistant echoes back the message entered by the User in a *streaming* +fashion. """ diff --git a/docs/examples/features/delayed_placeholder.py b/docs/examples/features/delayed_placeholder.py index 13e0f0e..8b46bc3 100644 --- a/docs/examples/features/delayed_placeholder.py +++ b/docs/examples/features/delayed_placeholder.py @@ -3,7 +3,6 @@ """ from asyncio import sleep -from random import choice import panel as pn diff --git a/docs/examples/langchain/llama_and_mistral.py b/docs/examples/langchain/llama_and_mistral.py index b29e928..b45d8f4 100644 --- a/docs/examples/langchain/llama_and_mistral.py +++ b/docs/examples/langchain/llama_and_mistral.py @@ -4,7 +4,6 @@ """ import panel as pn - from langchain.chains import LLMChain from langchain.llms import CTransformers from langchain.prompts import PromptTemplate @@ -23,7 +22,8 @@ } llm_chains = {} -TEMPLATE = """[INST] You are a friendly chat bot who's willing to help answer the user: +TEMPLATE = """[INST] You are a friendly chat bot who's willing to help answer the +user: {user_input} [/INST] """ diff --git a/docs/examples/openai/authentication.py b/docs/examples/openai/authentication.py index 344790d..f83cde0 100644 --- a/docs/examples/openai/authentication.py +++ b/docs/examples/openai/authentication.py @@ -22,7 +22,7 @@ def add_key_to_env(key): os.environ["OPENAI_API_KEY"] = key chat_interface.send( "Your OpenAI key has been set. Feel free to minimize the sidebar.", - **SYSTEM_KWARGS + **SYSTEM_KWARGS, ) chat_interface.disabled = False diff --git a/docs/index.md b/docs/index.md index 7912148..8199521 100644 --- a/docs/index.md +++ b/docs/index.md @@ -6,7 +6,7 @@ To run all of these examples locally: git clone https://github.com/holoviz-topics/panel-chat-examples cd panel-chat-examples pip install hatch -hatch run panel serve docs/examples/**/*.py --static-dirs thumbnails=docs/assets/thumbnails --autoreload +hatch run panel-serve ``` Note the default installation is not optimized for GPU usage. To enable GPU support for local @@ -157,7 +157,6 @@ Demonstrates how to delay the display of the placeholder. """ from asyncio import sleep -from random import choice import panel as pn @@ -448,7 +447,6 @@ Llama2. """ import panel as pn - from langchain.chains import LLMChain from langchain.llms import CTransformers from langchain.prompts import PromptTemplate @@ -574,7 +572,7 @@ def add_key_to_env(key): os.environ["OPENAI_API_KEY"] = key chat_interface.send( "Your OpenAI key has been set. Feel free to minimize the sidebar.", - **SYSTEM_KWARGS + **SYSTEM_KWARGS, ) chat_interface.disabled = False @@ -882,3 +880,4 @@ chat_interface.send( chat_interface.servable() ``` + diff --git a/pyproject.toml b/pyproject.toml index f4e5592..576284e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -5,6 +5,13 @@ extend-select = ["I", "TRY"] [tool.pytest.ini_options] addopts = "--cov=panel_chat_examples/ --cov-report=term-missing" asyncio_mode = "strict" +markers = [ + "slow: marks tests as slow (deselect with '-m \"not slow\"')", + "ui: marks tests as ui tests (deselect with '-m \"not ui\"')", +] + +[tool.isort] +profile = "black" [tool.hatch] @@ -17,34 +24,40 @@ commit_extra_args = ["-e"] path = "panel_chat_examples/__init__.py" [tool.hatch.envs.default] -python = "3.9" dependencies = [ "black", - "mypy", - "ruff", - "pytest", - "pytest-cov", + "isort", + "locust", "mkdocs-material", "mkdocstrings[python]", + "mypy", + "pre-commit", "pytest-asyncio", + "pytest-cov", "pytest-playwright", + "pytest", + "ruff", ] [tool.hatch.envs.default.scripts] test = "pytest" test-cov-xml = "pytest --cov-report=xml" lint = [ + "isort .", "black .", "ruff --fix .", "mypy panel_chat_examples/", ] lint-check = [ + "isort . --check-only", "black --check .", "ruff .", "mypy panel_chat_examples/", ] -docs-serve = "mkdocs serve" -docs-build = "mkdocs build" +docs-serve = "python scripts/generate_gallery.py;mkdocs serve" +docs-build = "python scripts/generate_gallery.py;mkdocs build" +panel-serve = "panel serve docs/examples/**/*.py --static-dirs thumbnails=docs/assets/thumbnails --autoreload" +loadtest = "locust -f tests/locustfile.py -H http://localhost:5006 --users 1 --spawn-rate 1" [build-system] requires = ["hatchling", "hatch-regex-commit"] diff --git a/requirements_dev.txt b/requirements_dev.txt deleted file mode 100644 index 056938f..0000000 --- a/requirements_dev.txt +++ /dev/null @@ -1,5 +0,0 @@ --r requirements.txt -isort -black -pytest -pytest-playwright \ No newline at end of file diff --git a/scripts/generate_gallery.py b/scripts/generate_gallery.py index 4a02975..f5b07c0 100644 --- a/scripts/generate_gallery.py +++ b/scripts/generate_gallery.py @@ -1,7 +1,7 @@ """Generates a markdown file describing the examples apps""" -from textwrap import dedent, indent from pathlib import Path +from textwrap import dedent, indent DOCS_PATH = Path(__file__).parent.parent / "docs" EXAMPLES_PATH = DOCS_PATH / "examples" @@ -15,8 +15,8 @@ def run(): Generates the text description looping the inside the EXAMPLES_PATH recursively - For each folder a header "## Folder Name" is added to the text - - For each .py file of a header is added "### File Name" to the text as well as the content of the - module docstring. + - For each .py file of a header is added "### File Name" to the text as well as the + content of the module docstring. """ text = dedent( @@ -28,12 +28,14 @@ def run(): git clone https://github.com/holoviz-topics/panel-chat-examples cd panel-chat-examples pip install hatch - hatch run panel serve docs/examples/**/*.py --static-dirs thumbnails=docs/assets/thumbnails --autoreload + # Set the OPENAI_API_KEY environment variable + hatch run panel-serve ``` - Note the default installation is not optimized for GPU usage. To enable GPU support for local - models (i.e. not OpenAI), install `ctransformers` with the proper backend and modify the - scripts configs' accordingly, e.g. `n_gpu_layers=1` for a single GPU. + Note the default installation is not optimized for GPU usage. To enable GPU + support for local models (i.e. not OpenAI), install `ctransformers` with the + proper backend and modify the scripts configs' accordingly, e.g. + `n_gpu_layers=1` for a single GPU. """ ) for folder in sorted(EXAMPLES_PATH.glob("**/"), key=lambda folder: folder.name): @@ -63,12 +65,14 @@ def run(): thumbnail_str = ( "\n" f'[]({source_path})\n' + f'alt="{title}" style="max-height: 400px; max-width: 100%;">]' + f"({source_path})\n" ) docstring_lines.append(thumbnail_str) docstring_lines.append( f"
\n" - f"Source code for {source_path.name}\n" + f"Source code for {source_path.name}\n" f"```python\n" f"{indent(file.read_text(), '' * 4).rstrip()}\n" f"```\n" diff --git a/tests/conftest.py b/tests/conftest.py index 199820b..7bd81a6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,7 +8,7 @@ from panel.io.state import state # pylint: disable=protected-access -EXAMPLES_PATH = Path(__file__).parent.parent / "examples" +EXAMPLES_PATH = Path(__file__).parent.parent / "docs/examples" # The fixtures in this module are heavily inspired the Panel conftest.py file. @@ -22,6 +22,17 @@ def get_default_port(): PORT = [get_default_port()] +REQUIRES_OPENAI_API_KEY = [ + "docs/examples/langchain/math_chain.py", + "docs/examples/langchain/chat_memory.py", +] + + +def _examples_to_skip(): + if "OPENAI_API_KEY" not in os.environ: + return [Path(path).absolute() for path in REQUIRES_OPENAI_API_KEY] + return [] + @pytest.fixture def port() -> int: @@ -35,9 +46,9 @@ def module_cleanup(): """ Cleanup Panel extensions after each test. """ - from bokeh.core.has_props import ( + from bokeh.core.has_props import ( # pylint: disable=import-outside-toplevel _default_resolver, - ) # pylint: disable=import-outside-toplevel + ) to_reset = list(panel_extension._imports.values()) @@ -66,13 +77,15 @@ def cache_cleanup(): def _get_paths(): + skip_paths = _examples_to_skip() paths = [] for folder in sorted(EXAMPLES_PATH.glob("**/"), key=lambda folder: folder.name): if folder.name == "examples": continue for file in folder.glob("*.py"): - paths.append(str(file)) + if file not in skip_paths: + paths.append(str(file)) return paths diff --git a/tests/locustfile.py b/tests/locustfile.py new file mode 100644 index 0000000..d69e00f --- /dev/null +++ b/tests/locustfile.py @@ -0,0 +1,21 @@ +"""Locust load test file""" +from pathlib import Path +from random import choice + +from conftest import APP_PATHS # pylint: disable=import-error +from locust import HttpUser, task + + +class RandomPageUser(HttpUser): + """This User gets a random page""" + + @task(weight=len(APP_PATHS)) + def get_random_page(self): + """Gets a random page""" + app_path = choice(APP_PATHS) + self.client.get(f"/{Path(app_path).name.replace('.py', '')}") + + @task + def get_index_page(self): + """Gets the index page""" + self.client.get("/") diff --git a/tests/ui/test_all.py b/tests/ui/test_all.py index 194b039..8c7696c 100644 --- a/tests/ui/test_all.py +++ b/tests/ui/test_all.py @@ -53,18 +53,28 @@ def _take_screenshot(app_path, page): ) -def test_app(app_path, page, port): - """Test the UI of an app via Playwright""" - serve(app_path, port=port, threaded=True, show=False) - +@pytest.fixture +def server(app_path, port): + """Returns a panel server runnning the app""" + bokeh_allow_ws_origin = os.environ.get("BOKEH_ALLOW_WS_ORIGIN") + os.environ["BOKEH_ALLOW_WS_ORIGIN"] = "localhost" + server = serve(app_path, port=port, threaded=True, show=False) time.sleep(0.2) + yield server + server.stop() + if bokeh_allow_ws_origin: + os.environ["BOKEH_ALLOW_WS_ORIGIN"] = bokeh_allow_ws_origin + +def test_app(server, app_path, port, page): + """Test the UI of an app via Playwright""" msgs = [] - page.on("console", msgs.append) + # Without the lambda below an AttributeError will be raised + page.on("console", lambda: msgs.append) page.goto(f"http://localhost:{port}", timeout=40_000) - _take_screenshot(app_path, page) assert _bokeh_messages_have_been_logged(msgs) _expect_no_traceback(page) assert _page_not_empty(page), "The page is empty, No
element was not found" + _take_screenshot(app_path, page)