Skip to content

Commit

Permalink
Add claude, gemini, and palm LLM charts and configuration (#61)
Browse files Browse the repository at this point in the history
Update MQ deployments to support multiple mounted secrets for Google API compat.
Add base personas for added LLMs to test
Update backend deployment script to include new LLM services config

Co-authored-by: Daniel McKnight <[email protected]>
  • Loading branch information
NeonDaniel and NeonDaniel authored Jan 17, 2024
1 parent 78c1bdf commit 62d3eee
Show file tree
Hide file tree
Showing 39 changed files with 554 additions and 31 deletions.
107 changes: 103 additions & 4 deletions neon_diana_utils/configuration.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,12 @@ def make_llm_bot_config():
configuration = {"llm_bots": dict()}
if click.confirm("Configure ChatGPT Personas?"):
configuration['llm_bots']['chat_gpt'] = persona_config['chat_gpt']
# TODO: Add other LLM personas here
if click.confirm("Configure PaLM2 Personas?"):
configuration['llm_bots']['palm2'] = persona_config['palm2']
if click.confirm("Configure Gemini Personas?"):
configuration['llm_bots']['gemini'] = persona_config['gemini']
if click.confirm("Configure Claude Personas?"):
configuration['llm_bots']['claude'] = persona_config['claude']
return configuration


Expand Down Expand Up @@ -163,7 +168,7 @@ def make_keys_config(write_config: bool,
click.confirm("Is this configuration correct?")

chatgpt_config = dict()
if click.confirm("Configure ChatGPT Service?"):
if click.confirm("Configure ChatGPT LLM?"):
config_confirmed = False
while not config_confirmed:
gpt_key = click.prompt("ChatGPT API Key", type=str)
Expand All @@ -189,7 +194,7 @@ def make_keys_config(write_config: bool,
click.confirm("Is this configuration correct?")

fastchat_config = dict()
if click.confirm("Configure FastChat Service?"):
if click.confirm("Configure FastChat LLM?"):
config_confirmed = False
while not config_confirmed:
model = click.prompt("FastChat Model", type=str,
Expand All @@ -213,11 +218,93 @@ def make_keys_config(write_config: bool,
click.echo(pformat(fastchat_config))
config_confirmed = click.confirm("Is this configuration correct?")

palm2_config = dict()
if click.confirm("Configure PaLM2 LLM?"):
config_confirmed = False
while not config_confirmed:
role = click.prompt("PaLM2 Role", type=str,
default="You are trying to give a short "
"answer in less than 40 words.")
context = click.prompt("PaLM2 context depth", type=int,
default=3)
max_tokens = click.prompt("Max number of tokens in responses",
type=int, default=100)
num_processes = click.prompt(
"Number of queries to handle in parallel",
type=int, default=1)
palm2_config = {
"key_path": "/config/neon/google.json",
"role": role,
"context_depth": context,
"max_tokens": max_tokens,
"num_parallel_processes": num_processes
}
click.echo(pformat(palm2_config))
config_confirmed = click.confirm("Is this configuration correct?")

gemini_config = dict()
if click.confirm("Configure Gemini LLM?"):
config_confirmed = False
while not config_confirmed:
model = click.prompt("Gemini Model", type=str,
default="gemini-pro")
role = click.prompt("Gemini Role", type=str,
default="You are trying to give a short "
"answer in less than 40 words.")
context = click.prompt("Gemini context depth", type=int,
default=3)
max_tokens = click.prompt("Max number of tokens in responses",
type=int, default=100)
num_processes = click.prompt(
"Number of queries to handle in parallel",
type=int, default=1)
gemini_config = {
"key_path": "/config/neon/google.json",
"model": model,
"role": role,
"context_depth": context,
"max_tokens": max_tokens,
"num_parallel_processes": num_processes
}
click.echo(pformat(gemini_config))
config_confirmed = click.confirm("Is this configuration correct?")

claude_config = dict()
if click.confirm("Configure Anthropic Claude LLM?"):
config_confirmed = False
while not config_confirmed:
anthropic_key = click.prompt("Antrhopic API Key", type=str)
openai_key = click.prompt("OpenAI API Key", type=str,
default=chatgpt_config.get('key'))
model = click.prompt("Anthropic Model", type=str,
default="claude-2")
role = click.prompt("Role", type=str,
default="You are trying to give a short "
"answer in less than 40 words.")
context = click.prompt("Context depth", type=int, default=3)
max_tokens = click.prompt("Maximum tokens in responses", type=int,
default=256)
claude_config = {
"key": anthropic_key,
"openai_key": openai_key,
"model": model,
"role": role,
"context_depth": context,
"max_tokens": max_tokens
}
click.echo(pformat(claude_config))
config_confirmed = \
click.confirm("Is this configuration correct?")

config = {"keys": {"api_services": api_services,
"emails": email_config,
"track_my_brands": brands_config},
"LLM_CHAT_GPT": chatgpt_config,
"FastChat": fastchat_config
"LLM_FASTCHAT": fastchat_config,
"LLM_PALM2": palm2_config,
"LLM_GEMINI": gemini_config,
"LLM_CLAUDE": claude_config,
"FastChat": fastchat_config # TODO: Backwards-compat. only
}
if write_config:
click.echo(f"Writing configuration to {output_file}")
Expand Down Expand Up @@ -509,6 +596,18 @@ def configure_backend(username: str = None,
llm_config = make_llm_bot_config()
else:
llm_config = dict()
# Check if google.json file is expected
if any((keys_config['LLM_PALM2'], keys_config['LLM_GEMINI'])):
handled = False
while not handled:
cred = click.prompt("Path to Google credential file", type=str)
cred = expanduser(cred)
if not isfile(cred):
click.echo(f"Invalid path ({cred}). Please, try again.")
else:
shutil.copyfile(cred, join(dirname(diana_config),
"google.json"))
handled = True
config = {**{"MQ": {"users": mq_auth_config,
"server": "neon-rabbitmq",
"port": 5672}},
Expand Down
4 changes: 2 additions & 2 deletions neon_diana_utils/helm_charts/backend/diana-backend/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.14
version: 0.1.22

# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
Expand All @@ -35,5 +35,5 @@ dependencies:
version: 0.0.11
repository: file://../http-services
- name: diana-mq
version: 0.0.9
version: 0.0.16
repository: file://../mq-services
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
{{- define "google_json.secret"}}
apiVersion: v1
kind: Secret
metadata:
name: {{ tpl .Values.backend.googleSecret . }}
type: Opaque
data:
"google.json": |-
{{ tpl .Values.backend.googleJson . }}
{{- end -}}
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,10 @@
ghTokenEncoded: ''
dianaConfig: ''
rabbitMqConfig: ''
googleJson: ''

configSecret: diana-config
googleSecret: google-json

letsencrypt:
sslSecret: &ssl_secret letsencrypt-private-key
Expand Down
30 changes: 23 additions & 7 deletions neon_diana_utils/helm_charts/backend/mq-services/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,30 +3,46 @@ name: diana-mq
description: Deploy DIANA MQ Services

type: application
version: 0.0.9
version: 0.0.16
appVersion: "1.0.1a18"
dependencies:
- name: neon-api-proxy
alias: neon-api-proxy
version: 0.0.5
version: 0.0.6
repository: file://../../mq/neon-api-proxy
- name: neon-brands-service
alias: neon-brands-service
version: 0.0.5
version: 0.0.6
repository: file://../../mq/neon-brands-service
- name: neon-email-proxy
alias: neon-email-proxy
version: 0.0.5
version: 0.0.6
repository: file://../../mq/neon-email-proxy
- name: neon-metrics-service
alias: neon-metrics-service
version: 0.0.6
version: 0.0.7
repository: file://../../mq/neon-metrics-service
- name: neon-script-parser
alias: neon-script-parser
version: 0.0.5
version: 0.0.6
repository: file://../../mq/neon-script-parser
- name: neon-llm-chatgpt
alias: neon-llm-chatgpt
version: 0.0.6
version: 0.0.7
repository: file://../../mq/neon-llm-chatgpt
- name: neon-llm-fastchat
alias: neon-llm-fastchat
version: 0.0.6
repository: file://../../mq/neon-llm-fastchat
- name: neon-llm-claude
alias: neon-llm-claude
version: 0.0.2
repository: file://../../mq/neon-llm-claude
- name: neon-llm-gemini
alias: neon-llm-gemini
version: 0.0.2
repository: file://../../mq/neon-llm-gemini
- name: neon-llm-palm
alias: neon-llm-palm
version: 0.0.6
repository: file://../../mq/neon-llm-palm
2 changes: 1 addition & 1 deletion neon_diana_utils/helm_charts/base/base-mq/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,5 @@ name: base-mq
description: Library chart for basic MQ Services

type: library
version: 0.0.5
version: 0.0.10
appVersion: "1.0.1a18"
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,15 @@ spec:
- name: config
projected:
sources:
{{- if .Values.configSecrets }}
{{- range .Values.configSecrets }}
- secret:
name: {{ . }}
{{- end }}
{{ else }}
- secret:
name: {{ .Values.configSecret }}
{{- end }}
{{- if .Values.persistentVolumeClaim }}
- name: {{ .Values.persistentVolumeClaim.name }}
persistentVolumeClaim:
Expand Down
4 changes: 2 additions & 2 deletions neon_diana_utils/helm_charts/mq/neon-api-proxy/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@ name: neon-api-proxy
description: Deploy an MQ API Proxy

type: application
version: 0.0.5
version: 0.0.6
appVersion: "1.0.1a18"

dependencies:
- name: base-mq
version: 0.0.5
version: 0.0.10
repository: file://../../base/base-mq
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@ name: neon-brands-service
description: Deploy an MQ Brands Service

type: application
version: 0.0.5
version: 0.0.6
appVersion: "1.0.1a18"

dependencies:
- name: base-mq
version: 0.0.5
version: 0.0.10
repository: file://../../base/base-mq
4 changes: 2 additions & 2 deletions neon_diana_utils/helm_charts/mq/neon-email-proxy/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@ name: neon-email-proxy
description: Deploy an Email Proxy Service

type: application
version: 0.0.5
version: 0.0.6
appVersion: "1.0.1a18"

dependencies:
- name: base-mq
version: 0.0.5
version: 0.0.10
repository: file://../../base/base-mq
4 changes: 2 additions & 2 deletions neon_diana_utils/helm_charts/mq/neon-llm-chatgpt/Chart.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@ name: neon-llm-chatgpt
description: Deploy an LLM proxy for Chat GPT

type: application
version: 0.0.6
version: 0.0.7
appVersion: "1.0.1a18"

dependencies:
- name: base-mq
version: 0.0.5
version: 0.0.10
repository: file://../../base/base-mq
23 changes: 23 additions & 0 deletions neon_diana_utils/helm_charts/mq/neon-llm-claude/.helmignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/
12 changes: 12 additions & 0 deletions neon_diana_utils/helm_charts/mq/neon-llm-claude/Chart.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
apiVersion: v2
name: neon-llm-claude
description: Deploy an LLM proxy for Claude by Anthropic

type: application
version: 0.0.2
appVersion: "1.0.1a16"

dependencies:
- name: base-mq
version: 0.0.10
repository: file://../../base/base-mq
Loading

0 comments on commit 62d3eee

Please sign in to comment.