Skip to content

Commit

Permalink
Generate a list of urls to query
Browse files Browse the repository at this point in the history
  • Loading branch information
soapy1 committed Jan 23, 2025
1 parent d517a1d commit e3a7ada
Showing 1 changed file with 33 additions and 15 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,32 @@ def get_username_hook(spawner):
)


def get_total_records(url: str, token: str) -> int:
import urllib3
http = urllib3.PoolManager()
response = http.request(
"GET", url, headers={"Authorization": f"Bearer {token}"}
)
decoded_response = json.loads(response.data.decode("UTF-8"))
return decoded_response.get("count", 0)


def generate_paged_urls(base_url: str, total_records: int, page_size: int) -> list[str]:
import math

urls = []
# pages starts at 1
for page in range(1, math.ceil(total_records/page_size)+1):
urls.append(f"{base_url}?size={page_size}&page={page}")

return urls


# TODO: this should get unit tests. Currently, since this is not a python module,
# adding tests in a traditional sense is not possible. See https://github.com/soapy1/nebari/tree/try-unit-test-spawner
# for a demo on one approach to adding test.
def get_conda_store_environments(user_info: dict):
import urllib3
import yarl
import math
import os

# Check for the environment variable `CONDA_STORE_API_PAGE_SIZE_LIMIT`. Fall
Expand All @@ -43,26 +62,25 @@ def get_conda_store_environments(user_info: dict):
token = z2jh.get_config("custom.conda-store-jhub-apps-token")
endpoint = "conda-store/api/v1/environment"

url = yarl.URL(f"http://{external_url}/{endpoint}/?size={page_size}")
base_url = f"http://{external_url}/{endpoint}/"
http = urllib3.PoolManager()
response = http.request(
"GET", str(url), headers={"Authorization": f"Bearer {token}"}
)

# parse response
decoded_response = json.loads(response.data.decode("UTF-8"))
env_data = decoded_response.get("data", [])
total_records = decoded_response.get("count", 0)
# get total number of records from the endpoint
total_records = get_total_records(base_url, token)

# will contain all the environment info returned from the api
env_data = []

# If there are more records than the specified size limit, then
# will need to page through to get all the available envs
if total_records > page_size:
# Already pulled the first page of results, start looping through
# the envs starting on the 2nd page
for page in range(2, math.ceil(total_records/page_size)+1):
url = yarl.URL(f"http://{external_url}/{endpoint}/?size={page_size}&page={page}")
# generate a list of urls to hit to build the response
urls = generate_paged_urls(base_url, total_records, page_size)

# get content from urls
for url in urls:
response = http.request(
"GET", str(url), headers={"Authorization": f"Bearer {token}"}
"GET", url, headers={"Authorization": f"Bearer {token}"}
)
decoded_response = json.loads(response.data.decode("UTF-8"))
env_data += decoded_response.get("data", [])
Expand Down

0 comments on commit e3a7ada

Please sign in to comment.