Skip to content

Commit

Permalink
Merge branch 'main' into add_label_posts_review_reason
Browse files Browse the repository at this point in the history
  • Loading branch information
sunildkumar committed Nov 21, 2023
2 parents 2774447 + 3fd63d3 commit 9dad3e4
Show file tree
Hide file tree
Showing 47 changed files with 2,568 additions and 785 deletions.
35 changes: 34 additions & 1 deletion .github/workflows/cicd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,28 @@ jobs:
- name: Build website
run: npm run build

# Checks that the API reference docs built with shpinx build
test-api-reference-docs:
runs-on: ubuntu-latest
steps:
- name: get code
uses: actions/checkout@v3
- name: install python
uses: actions/setup-python@v4
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: install poetry
uses: snok/install-poetry@v1
with:
version: ${{ env.POETRY_VERSION }}

- name: Install dependencies
run: make install-sphinx-deps

- name: Build API documentation
run: |
make apidocs
# Run integration tests against the API (only on the main branch, though). The comprehensive
# version runs a matrix of python versions for better coverage.
test-comprehensive:
Expand Down Expand Up @@ -190,22 +212,33 @@ jobs:
if: github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/v')
needs:
- test-comprehensive
- test-api-reference-docs
runs-on: ubuntu-latest
defaults:
run:
working-directory: docs/
steps:
- name: Get code
uses: actions/checkout@v3
- name: Install poetry
uses: snok/install-poetry@v1
with:
version: ${{ env.POETRY_VERSION }}
- name: Setup npm
uses: actions/setup-node@v3
with:
node-version: 18
cache: npm
- name: Install dependencies
run: npm install
- name: Install sphinx dependencies
run: |
cd ..
make install-sphinx-deps
- name: Build website
run: npm run build
run: |
cd ..
make docs-comprehensive
- name: Deploy website (if on main branch)
# Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus
uses: peaceiris/actions-gh-pages@v3
Expand Down
17 changes: 14 additions & 3 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,17 @@
"editor.codeActionsOnSave": {
"source.organizeImports": true
},
"python.analysis.extraPaths": ["./generated"],
"python.formatting.provider": "black"
}
"editor.rulers": [
100,
120
],
"python.analysis.extraPaths": [
"./generated"
],
"python.formatting.provider": "black",
"[python]": {
"editor.codeActionsOnSave": {
"source.organizeImports": false
}
}
}
53 changes: 53 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ install-extras: install ## Install the package from source with extra dependenc
install-lint: ## Only install the linter dependencies
poetry install --only lint

install-dev: ## Only install the dev dependencies
poetry install --only dev

install-pre-commit: install ## Install pre-commit hooks
poetry run pre-commit install

Expand Down Expand Up @@ -39,6 +42,9 @@ test-integ: install ## Run tests against the integ API server (needs GROUNDLIGH
test-docs: install ## Run the example code and tests in our docs against the prod API (needs GROUNDLIGHT_API_TOKEN)
poetry run pytest -v --markdown-docs ${TEST_ARGS} docs README.md

test-docs-integ: install ## Run the example code and tests in our docs against the integ API (needs GROUNDLIGHT_API_TOKEN)
GROUNDLIGHT_ENDPOINT="https://api.integ.groundlight.ai/" poetry run pytest -v --markdown-docs ${TEST_ARGS} docs README.md

# Adjust which paths we lint
LINT_PATHS="src test bin samples"

Expand All @@ -47,3 +53,50 @@ lint: install-lint ## Run linter to check formatting and style

format: install-lint ## Run standard python formatting
./code-quality/format ${LINT_PATHS}


# Targets for sphinx documentation

install-sphinx-deps: ## Only install the sphinx dependencies
poetry install --no-root --only sphinx-deps

# The following is auto-generated by sphinx-quickstart:
# To test out doc changes locally, run
# poetry run make html && open build/html/index.html
SPHINXOPTS ?=
SPHINXBUILD ?= sphinx-build
SOURCEDIR = sphinx_docs
BUILDDIR = build

sphinx-help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)


# The .PHONY directive tells make that `apidocs` and `html` are labels for
# commands. `apidocs: html` allows us to generate docs by running
# `make apidocs` instead.
.PHONY: docs-comprehensive apidocs html

# Start an interactive server to test docs locally.
# Before running this, make sure that you have installed the node modules
# by running `node install` in the docs directory.
develop-docs-comprehensive: docs-comprehensive
cd docs && npm start

## Builds docs comprehensively (integrating API reference docs built
## with sphinx into the docusaurus docs).
docs-comprehensive: apidocs
rm -rf docs/static/api-reference-docs
rm -rf docs/build/api-reference-docs
mkdir docs/static/api-reference-docs
mv build/html/* docs/static/api-reference-docs/

cd docs && npm run build

apidocs:
cd docs && npm install
poetry run make html

html:
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(0)
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
3 changes: 3 additions & 0 deletions docs/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
# Generated files
.docusaurus
.cache-loader
.package-lock.json
# api-reference-docs are generated by sphinx and placed here.
/static/api-reference-docs/

# Misc
.DS_Store
Expand Down
7 changes: 4 additions & 3 deletions docs/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,10 @@ The docs are included with the SDK so that we can automate testing of the code s
Doc changes are published automatically when they're merged to main. To preview changes, build and host the site locally. You'll need a reasonably modern version of `npm` and then:

```
npm install
npm run build # looks for any errors
npm start # starts interactive server
cd docs
npm install
cd ..
make develop-docs-comprehensive
```

and then open [http://localhost:3000/python-sdk](http://localhost:3000/python-sdk).
Expand Down
5 changes: 5 additions & 0 deletions docs/docs/api-reference/_category_.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"label": "API Reference",
"position": 5,
"collapsed": false
}
7 changes: 7 additions & 0 deletions docs/docs/api-reference/api-reference.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
id: redirect
title: API Reference
hide_title: true
---

<meta http-equiv="refresh" content="0; url=/python-sdk/api-reference-docs/" />
29 changes: 29 additions & 0 deletions docs/docs/building-applications/1-sample-applications.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
# Sample Applications

Explore these GitHub repositories to see examples of Groundlight-powered applications:

## Groundlight Stream Processor

Repository: [https://github.com/groundlight/stream](https://github.com/groundlight/stream)

The Groundlight Stream Processor is an easy-to-use Docker container for analyzing RTSP streams or common USB-based cameras. You can run it with a single Docker command, such as:

```bash
docker run stream:local --help
```

## Arduino ESP32 Camera Sample App

Repository: [https://github.com/groundlight/esp32cam](https://github.com/groundlight/esp32cam)

This sample application allows you to build a working AI vision detector using an inexpensive WiFi camera. With a cost of under $10, you can create a powerful and affordable AI vision system.

## Raspberry Pi

Repository: [https://github.com/groundlight/raspberry-pi-door-lock](https://github.com/groundlight/raspberry-pi-door-lock)

This sample application demonstrates how to set up a Raspberry Pi-based door lock system. The application monitors a door and sends a notification if the door is observed to be unlocked during non-standard business hours.

## Industrial and Manufacturing Applications

Groundlight can be used to [apply modern natural-language-based computer vision to industrial and manufacturing applications](/docs/building-applications/industrial).
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
---
sidebar_position: 1
---

# Grabbing Images

Groundlight's SDK accepts images in many popular formats, including PIL, OpenCV, and numpy arrays.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
---
sidebar_position: 3
sidebar_position: 2
---

# Working with Detectors
Expand All @@ -8,7 +8,9 @@ sidebar_position: 3

Typically you'll use the `get_or_create_detector(name: str, query: str)` method to find an existing detector you've already created with the same name, or create a new one if it doesn't exists. But if you'd like to force creating a new detector you can also use the `create_detector(name: str, query: str)` method

```python
<!-- Don't test because we don't allow reusing the same name across multiple detectors -->

```python notest
from groundlight import Groundlight

gl = Groundlight()
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,6 @@
---
sidebar_position: 3
---
# Confidence Levels

Groundlight gives you a simple way to control the trade-off of latency against accuracy. The longer you can wait for an answer to your image query, the better accuracy you can get. In particular, if the ML models are unsure of the best response, they will escalate the image query to more intensive analysis with more complex models and real-time human monitors as needed. Your code can easily wait for this delayed response. Either way, these new results are automatically trained into your models so your next queries will get better results faster.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
---
sidebar_position: 4
---

# Handling Server Errors

When building applications with the Groundlight SDK, you may encounter server errors during API calls. This page covers how to handle such errors and build robust code that can gracefully handle exceptions.
Expand Down
73 changes: 73 additions & 0 deletions docs/docs/building-applications/6-async-queries.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
---
sidebar_position: 5
---

# Asynchronous Queries

Groundlight provides a simple interface for submitting asynchronous queries. This is useful for times in which the thread or process or machine submitting image queries is not the same thread or machine that will be retrieving and using the results. For example, you might have a forward deployed robot or camera that submits image queries to Groundlight, and a separate server that retrieves the results and takes action based on them. We will refer to these two machines as the **submitting machine** and the **retrieving machine**.

## Setup Submitting Machine
On the **submitting machine**, you will need to install the Groundlight Python SDK. Then you can submit image queries asynchronously using the `ask_async` interface (read the full documentation [here](pathname:///python-sdk/api-reference-docs/#groundlight.client.Groundlight.ask_async)). `ask_async` submits your query and returns as soon as the query is submitted. It does not wait for an answer to be available prior to returning to minimize the time your program spends interacting with Groundlight. As a result, the `ImageQuery` object `ask_async` returns lacks a `result` (the `result` field will be `None`). This is acceptable for this use case as the **submitting machine** is not interested in the result. Instead, the **submitting machine** just needs to communicate the `ImageQuery.id`s to the **retrieving machine** - this might be done via a database, a message queue, or some other mechanism. For this example, we assume you are using a database where you save the `ImageQuery.id` to it via `db.save(image_query.id)`.

```python notest
from groundlight import Groundlight
import cv2
from time import sleep

detector = gl.get_or_create_detector(name="your_detector_name", query="your_query")

cam = cv2.VideoCapture(0) # Initialize camera (0 is the default index)

while True:
_, image = cam.read() # Capture one frame from the camera
image_query = gl.ask_async(detector=detector, image=image) # Submit the frame to Groundlight
db.save(image_query.id) # Save the image_query.id to a database for the retrieving machine to use
sleep(10) # Sleep for 10 seconds before submitting the next query

cam.release() # Release the camera

```

## Setup Retrieving Machine
On the **retrieving machine** you will need to install the Groundlight Python SDK. Then you can retrieve the results of the image queries submitted by another machine using `get_image_query`. The **retrieving machine** can then use the `ImageQuery.result` to take action based on the result for whatever application you are building. For this example, we assume your application looks up the next image query to process from a database via `db.get_next_image_query_id()` and that this function returns `None` once all `ImageQuery`s are processed.
```python notest
from groundlight import Groundlight

detector = gl.get_or_create_detector(name="your_detector_name", query="your_query")

image_query_id = db.get_next_image_query_id()

while image_query_id is not None:
image_query = gl.get_image_query(id=image_query_id) # retrieve the image query from Groundlight
result = image_query.result

# take action based on the result of the image query
if result.label == 'YES':
pass # TODO: do something based on your application
elif result.label == 'NO':
pass # TODO: do something based on your application
elif result.label == 'UNCLEAR':
pass # TODO: do something based on your application

# update image_query_id for next iteration of the loop
image_query_id = db.get_next_image_query_id()
```

## Important Considerations
When you submit an image query asynchronously, ML prediction on your query is **not** instant. So attempting to retrieve the result immediately after submitting an async query will likely result in an `UNCLEAR` result as Groundlight is still processing your query. Instead, if your code needs a `result` synchronously we recommend using one of our methods with a polling mechanism to retrieve the result. You can see all of the interfaces available in the documentation [here](pathname:///python-sdk/api-reference-docs/#groundlight.client.Groundlight).

```python notest
from groundlight import Groundlight
from PIL import Image

detector = gl.get_or_create_detector(name="your_detector_name", query="your_query")
image = Image.open("/path/to/your/image.jpg")
image_query = gl.ask_async(detector=detector, image=image) # Submit async query to Groundlight
result = image_query.result # This will always be 'None' as you asked asynchronously

image_query = gl.get_image_query(id=image_query.id) # Immediately retrieve the image query from Groundlight
result = image_query.result # This will likely be 'UNCLEAR' as Groundlight is still processing your query

image_query = gl.wait_for_confident_result(id=image_query.id) # Poll for a confident result from Groundlight
result = image_query.result
```
34 changes: 34 additions & 0 deletions docs/docs/building-applications/7-edge.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
---
sidebar_position: 6
---

# Using Groundlight on the Edge

If your account has access to edge models, you can download and install them to your edge devices.
This allows you to run your model evaluations on the edge, reducing latency, cost, network bandwidth, and energy.

## How the Edge Endpoint works

The Edge Endpoint runs as a set of docker containers on an "edge device". This edge device can be an NVIDIA Jetson device, rack-mounted server, or even a Raspberry Pi. The Edge Endpoint is responsible for downloading and running the models,
and for communicating with the Groundlight cloud service.

To use the edge endpoint, simply configure the Groundlight SDK to use the edge endpoint's URL instead of the cloud endpoint.
All application logic will work seamlessly and unchanged with the Groundlight Edge Endpoint, except some ML answers will
return much faster locally. The only visible difference is that image queries answered at the edge endpoint will have the prefix `iqe_` instead of `iq_` for image queries answered in the cloud. `iqe_` stands for "image query edge". Edge-originated
image queries will not appear in the cloud dashboard.

## Configuring the Edge Endpoint

To configure the Groundlight SDK to use the edge endpoint, you can either pass the endpoint URL to the Groundlight constructor like:

```python
from groundlight import Groundlight
gl = Groundlight(endpoint="http://localhost:6717")
```

or by setting the `GROUNDLIGHT_ENDPOINT` environment variable like:

```bash
export GROUNDLIGHT_ENDPOINT=http://localhost:6717
python your_app.py
```
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
---
sidebar_position: 7
---

# Industrial and Manufacturing Applications

Modern natural language-based computer vision is transforming industrial and manufacturing applications by enabling more intuitive interaction with automation systems. Groundlight offers cutting-edge computer vision technology that can be seamlessly integrated into various industrial processes, enhancing efficiency, productivity, and quality control.
Expand Down
Loading

0 comments on commit 9dad3e4

Please sign in to comment.