diff --git a/CHANGELOG.md b/CHANGELOG.md
index 3b4c04c8e..813deb9bd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -11,10 +11,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Added the ability to switch from assertive (default) to polite aria modes, in the help menu (#309).
- Added OpenAI GPT4-vision query system. Hit ? from the main chart to toggle on. (#317))
+- Added suggestions system for users to be able to more easily click
### Fixed
-
- LLM popup now only triggered by ?, not /
- LLM truncating responses, #322
@@ -27,7 +27,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Added lineplot, stacked bar, dodged bar, and normalized dodge bar info to the README (#310).
- Added Code of Conduct file in the project.
-
## [1.0.4] - 2023-11-30
### Added
diff --git a/src/css/styles.css b/src/css/styles.css
index f21d9c1fc..7e1dab7cb 100644
--- a/src/css/styles.css
+++ b/src/css/styles.css
@@ -299,3 +299,9 @@ textarea {
.chatLLM_message_other {
align-self: flex-start;
}
+
+.LLM_suggestions > button {
+ font-weight: normal;
+ border-radius: 0.5rem;
+ border: none;
+}
diff --git a/src/js/constants.js b/src/js/constants.js
index a43d74a29..9a71020f5 100644
--- a/src/js/constants.js
+++ b/src/js/constants.js
@@ -672,6 +672,11 @@ class ChatLLM {
+
+
+
+
+
@@ -744,6 +749,22 @@ class ChatLLM {
}
},
]);
+
+ // ChatLLM suggestion events
+ let suggestions = document.querySelectorAll(
+ '#chatLLM .LLM_suggestions button'
+ );
+ for (let i = 0; i < suggestions.length; i++) {
+ constants.events.push([
+ suggestions[i],
+ 'click',
+ function (e) {
+ let text = e.target.innerHTML;
+ chatLLM.DisplayChatMessage('User', text);
+ chatLLM.Submit(text);
+ },
+ ]);
+ }
}
/**
@@ -765,8 +786,16 @@ class ChatLLM {
let xhr = new XMLHttpRequest();
+ // start waiting sound
+ if (constants.sonifMode != 'off') {
+ chatLLM.WaitingSound(true);
+ }
+
if (constants.LLMDebugMode == 1) {
- chatLLM.ProcessLLMResponse(this.fakeLLMResponseData());
+ // do the below with a 5 sec delay
+ setTimeout(function () {
+ chatLLM.ProcessLLMResponse(chatLLM.fakeLLMResponseData());
+ }, 5000);
} else {
fetch(url, {
method: 'POST',
@@ -781,18 +810,58 @@ class ChatLLM {
chatLLM.ProcessLLMResponse(data);
})
.catch((error) => {
+ chatLLM.WaitingSound(false);
console.error('Error:', error);
// also todo: handle errors somehow
});
}
}
+ /*
+ * Sets a waiting sound to play while waiting for the LLM to respond.
+ * @function
+ * @name SetWaitingSound
+ * @memberof module:constants
+ * @onoff {boolean} - Whether to turn the waiting sound on or off. Defaults to true (on).
+ * @returns {void}
+ */
+ WaitingSound(onoff = true) {
+ // clear old intervals and timeouts
+ if (constants.waitingInterval) {
+ // destroy old waiting sound
+ clearInterval(constants.waitingInterval);
+ constants.waitingSound = null;
+ }
+ if (constants.waitingSoundOverride) {
+ clearTimeout(constants.waitingSoundOverride);
+ constants.waitingSoundOverride = null;
+ }
+
+ // assuming we're turning it on, start playing a new waiting sound
+ if (onoff) {
+ // create new waiting sound
+ let delay = 1000;
+ let freq = 440; // a440 babee
+ constants.waitingInterval = setInterval(function () {
+ if (audio) {
+ audio.playOscillator(freq, 0.2, 0);
+ }
+ }, delay);
+
+ // clear automatically after 30 sec, assuming no response
+ constants.waitingSoundOverride = setTimeout(function () {
+ chatLLM.WaitingSound(false);
+ }, 30000);
+ }
+ }
+
/**
* Processes the response from the LLM and displays it to the user.
* @function
* @returns {void}
*/
ProcessLLMResponse(data) {
+ chatLLM.WaitingSound(false);
console.log('LLM response: ', data);
let text = data.choices[0].message.content;
chatLLM.DisplayChatMessage('LLM', text);
@@ -869,7 +938,7 @@ class ChatLLM {
this.requestJson = {};
this.requestJson.model = 'gpt-4-vision-preview';
this.requestJson.max_tokens = constants.LLMmaxResponseTokens; // note: if this is too short (tested with less than 200), the response gets cut off
- this.requestJson.detail = constants.LLMDetail;
+ //this.requestJson.detail = constants.LLMDetail;
this.requestJson.messages = [];
this.requestJson.messages[0] = {};
this.requestJson.messages[0].role = 'system';