diff --git a/README.md b/README.md
index 5d809cf..48c5df1 100644
--- a/README.md
+++ b/README.md
@@ -1,10 +1,11 @@
# tinyterraNodes
A selection of custom nodes for [ComfyUI](https://github.com/comfyanonymous/ComfyUI).
-
+
+
## Installation
-Navigate to the **_ComfyUI/custom_nodes_** directory, and run:
+Navigate to the **_ComfyUI/custom_nodes_** directory with cmd, and run:
`git clone https://github.com/TinyTerra/ComfyUI_tinyterraNodes.git`
@@ -22,13 +23,31 @@ Navigate to the **_ComfyUI/custom_nodes_** directory, and run:
+ Automatically hides and shows widgets depending on their relevancy
+ Option to disable ([ttNodes] enable_dynamic_widgets = True | False)
-**ttNstyles**
+**ttNinterface**
*Enabled by default*
-+ Sets the link color for PIPE_LINE and INT (customizable within config.ini [ttNstyles])
-+ Option to update default node background color automatically when added ([ttNstyles] node_bg_override = default | red | brown | green | blue | pale_blue | cyan | purple | yellow | black)
-+ Option to change the default link line type ([ttNstyles] link_type = spline | straight | direct)
++ Adds 'Node Dimensions (ttN)' to the node right-click context menu
Allows setting specific node Width and Height values as long as they are above the minimum size for the given node.
++ Adds support for 'ctrl + arrow key' Node movement
This aligns the node(s) to the set ComfyUI grid spacing size and move the node in the direction of the arrow key by the grid spacing value. Holding shift in addition will move the node by the grid spacing size * 10.
++ Adds 'Reload Node (ttN)' to the node right-click context menu
Creates a new instance of the node with the same position, size, color and title (will disconnect any IO wires). It attempts to retain set widget values which is useful for replacing nodes when a node/widget update occurs
++ Adds 'Slot Type Color (ttN)' to the Link right-click context menu
Opens a color picker dialog menu to update the color of the selected link type.
++ Adds 'Link Style (ttN)' to the Link right-click context menu
Sets the default link line type.
+
+**Save image prefix parsing**
+
++ Add date/time info to filenames by using: %date:yyyy-MM-dd-hh-mm-ss%
++ Parse any upstream setting into filenames by using %[widget_name]% (for the current node)
+or %[input_name]>[input_name]>[widget_name]% (for inputting nodes)
+ Example:
+
+
+ 
+
+
+**Node Versioning**
+
++ All tinyterraNodes now have a version property so that if any future changes are made to widgets that would break workflows the nodes will be highlighted on load
++ Will only work with workflows created/saved after the v1.0.0 release
**AutoUpdate**
@@ -110,13 +129,21 @@ Convert ttN pipe line to detailer pipe (to be compatible with [ImpactPack](https
+ _**Outputs -** detailer_pipe[model, vae, conditioning, conditioning, bbox_detector, sam_model_opt], pipe_
+
+ pipe > xyPlot
+
+pipeKSampler input to generate xy plots using sampler and loader values. (Any values not set by xyPlot will be taken from the corresponding pipeKSampler or pipeLoader)
++ _**Inputs -** grid_spacing, latent_id, flip_xy, x_axis, x_values, y_axis, y_values_
++ _**Outputs -** xyPlot_
+
+
## ttN/image
imageOutput
Preview or Save an image with one node, with image throughput.
-+ _**Inputs -** image, image output[Preview, Save], save prefix_
++ _**Inputs -** image, image output[Hide, Preview, Save, Hide/Save], output path, save prefix, number padding[None, 2-9], overwrite existing[True, False], embed workflow[True, False]_
+ _**Outputs -** image_
diff --git a/__init__.py b/__init__.py
index 9815e80..efe96f5 100644
--- a/__init__.py
+++ b/__init__.py
@@ -1,37 +1,28 @@
-from pathlib import Path
+from .tinyterraNodes import TTN_VERSIONS
import configparser
import folder_paths
import subprocess
import shutil
-import json
import sys
import os
# ------- CONFIG -------- #
-cwd_path = Path(__file__).parent
-comfy_path = cwd_path.parent.parent
-sitepkg = comfy_path.parent / 'python_embeded' / 'Lib' / 'site-packages'
-script_path = comfy_path.parent / 'python_embeded' / 'Scripts'
+cwd_path = os.path.dirname(os.path.realpath(__file__))
+comfy_path = folder_paths.base_path
-sys.path.append(str(sitepkg))
-sys.path.append(str(script_path))
-
-config_path = cwd_path / "config.ini"
-
-default_pipe_color = "#7737AA"
-default_int_color = "#29699C"
+config_path = os.path.join(cwd_path, "config.ini")
optionValues = {
"auto_update": ('true', 'false'),
"install_rembg": ('true', 'false'),
"enable_embed_autocomplete": ('true', 'false'),
- "apply_custom_styles": ('true', 'false'),
+ "enable_interface": ('true', 'false'),
"enable_dynamic_widgets": ('true', 'false'),
"enable_dev_nodes": ('true', 'false'),
- "link_type": ('spline', 'straight', 'direct'),
"default_node_bg_color": ('default', 'red', 'brown', 'green', 'blue', 'pale_blue', 'cyan', 'purple', 'yellow', 'black'),
"pipe_line": "HEX Color Code (pipe_line link color)",
"int": "HEX Color Code (int link color)",
+ "xyplot": "HEX Color Code (xyplot link color)",
}
def get_config():
@@ -42,6 +33,9 @@ def get_config():
def update_config():
#section > option > value
+ for node, version in TTN_VERSIONS.items():
+ config_write("Versions", node, version)
+
for option, value in optionValues.items():
config_write("Option Values", option, value)
@@ -49,16 +43,10 @@ def update_config():
"ttNodes": {
"auto_update": False,
"install_rembg": True,
- "apply_custom_styles": True,
+ "enable_interface": True,
"enable_embed_autocomplete": True,
"enable_dynamic_widgets": True,
"enable_dev_nodes": False,
- },
- "ttNstyles": {
- "default_node_bg_color": 'default',
- "link_type": "spline",
- "pipe_line": default_pipe_color,
- "int": default_int_color,
}
}
@@ -72,6 +60,8 @@ def update_config():
# Iterate through the configuration data.
for section, options in config_data.items():
+ if section == "Versions":
+ continue
for option in options:
# If the option is not in `optionValues` or in `section_data`, remove it.
if (option not in optionValues and
@@ -110,17 +100,6 @@ def copy_to_web(file):
"""Copy a file to the web extension path."""
shutil.copy(file, web_extension_path)
-def config_hex_code_validator(section, option, default):
- hex_code = config_read(section, option)
- try:
- int(hex_code[1:], 16) # Convert hex code without the '#' symbol
- if len(hex_code) == 7 and hex_code.startswith('#'):
- return hex_code
- except ValueError:
- print(f'\033[92m[{section} Config]\033[91m {option} - \'{hex_code}\' is not a valid hex code, reverting to default.\033[0m')
- config_write(section, option, default)
- return default
-
def config_value_validator(section, option, default):
value = str(config_read(section, option)).lower()
if value not in optionValues[option]:
@@ -168,16 +147,15 @@ def config_value_validator(section, option, default):
# --------- WEB ---------- #
web_extension_path = os.path.join(comfy_path, "web", "extensions", "tinyterraNodes")
-embedLISTfile = os.path.join(web_extension_path, "embeddingsList.json")
ttNstyles_JS_file_web = os.path.join(web_extension_path, "ttNstyles.js")
ttN_JS_file = os.path.join(cwd_path, "js", "ttN.js")
-ttNstyles_JS_file = os.path.join(cwd_path, "js", "ttNstyles.js")
+ttNxyPlot_JS_file = os.path.join(cwd_path, "js", "ttNxyPlot.js")
ttNembedAC_JS_file = os.path.join(cwd_path, "js", "ttNembedAC.js")
ttNwidgets_JS_file = os.path.join(cwd_path, "js", "ttNwidgets.js")
+ttNinterface_JS_file = os.path.join(cwd_path, "js", "ttNinterface.js")
ttNdynamicWidgets_JS_file = os.path.join(cwd_path, "js", "ttNdynamicWidgets.js")
-
if not os.path.exists(web_extension_path):
os.makedirs(web_extension_path)
else:
@@ -186,44 +164,14 @@ def config_value_validator(section, option, default):
copy_to_web(ttN_JS_file)
copy_to_web(ttNwidgets_JS_file)
+copy_to_web(ttNxyPlot_JS_file)
# Enable Custom Styles if True
-if config_value_validator("ttNodes", "apply_custom_styles", 'true') == 'true':
- link_type = config_read("ttNstyles", "link_type")
- if link_type == "straight":
- link_type = 0
- elif link_type == "direct":
- link_type = 1
- else:
- link_type = 2
-
- pipe_line_color = config_hex_code_validator("ttNstyles", "pipe_line", default_pipe_color)
- int_color = config_hex_code_validator("ttNstyles", "int", default_int_color)
- bg_override_color = config_value_validator("ttNstyles", "default_node_bg_color", 'default')
-
- # Apply config values to styles JS
- with open(ttNstyles_JS_file, 'r') as file:
- stylesJSlines = file.readlines()
-
- for i in range(len(stylesJSlines)):
- if "const customPipeLineLink" in stylesJSlines[i]:
- stylesJSlines[i] = f'const customPipeLineLink = "{pipe_line_color}"\n'
- if "const customIntLink" in stylesJSlines[i]:
- stylesJSlines[i] = f'const customIntLink = "{int_color}"\n'
- if "const customLinkType" in stylesJSlines[i]:
- stylesJSlines[i] = f'const customLinkType = {link_type}\n'
- if "const overrideBGColor" in stylesJSlines[i]:
- stylesJSlines[i] = f'const overrideBGColor = "{bg_override_color}"\n'
-
- with open(ttNstyles_JS_file_web, 'w') as file:
- file.writelines(stylesJSlines)
+if config_value_validator("ttNodes", "enable_interface", 'true') == 'true':
+ copy_to_web(ttNinterface_JS_file)
# Enable Embed Autocomplete if True
if config_value_validator("ttNodes", "enable_embed_autocomplete", "true") == 'true':
- embeddings_list = folder_paths.get_filename_list("embeddings")
- with open(embedLISTfile, 'w') as file:
- json.dump(embeddings_list, file)
-
copy_to_web(ttNembedAC_JS_file)
# Enable Dynamic Widgets if True
diff --git a/adv_encode.py b/adv_encode.py
index 72267ae..3232944 100644
--- a/adv_encode.py
+++ b/adv_encode.py
@@ -185,7 +185,7 @@ def advanced_encode_from_tokens(clip, tokenized, token_normalization, weight_int
weights = scale_to_norm(weights, word_ids, w_max)
weighted_emb = down_weight(unweighted_tokens, weights, word_ids, base_emb, clip)
- return weighted_emb
+ return [[weighted_emb,{}]]
def advanced_encode(clip, text, token_normalization, weight_interpretation, w_max=1.0):
tokenized = clip.tokenize(text, return_word_ids=True)
diff --git a/js/ttN.js b/js/ttN.js
index ed4f709..825c185 100644
--- a/js/ttN.js
+++ b/js/ttN.js
@@ -3,8 +3,103 @@ import { ComfyWidgets } from "/scripts/widgets.js";
app.registerExtension({
name: "comfy.ttN",
- async beforeRegisterNodeDef(nodeType, nodeData, app) {
- if (nodeData.name === "ttN textDebug") {
+ init() {
+ const ttNreloadNode = function (node) {
+ const nodeType = node.constructor.type;
+ const nodeTitle = node.properties.origVals ? node.properties.origVals.title : node.title
+ const nodeColor = node.properties.origVals ? node.properties.origVals.color : node.color
+ const bgColor = node.properties.origVals ? node.properties.origVals.bgcolor : node.bgcolor
+ const oldNode = node
+ const options = {'size': [node.size[0], node.size[1]],
+ 'color': nodeColor,
+ 'bgcolor': bgColor,
+ 'pos': [node.pos[0], node.pos[1]]}
+
+ let prevValObj = { 'val': undefined };
+
+ app.graph.remove(node)
+ const newNode = app.graph.add(LiteGraph.createNode(nodeType, nodeTitle, options));
+
+ if (newNode?.constructor?.hasOwnProperty('ttNnodeVersion')) {
+ newNode.properties.ttNnodeVersion = newNode.constructor.ttNnodeVersion;
+ }
+
+
+ function evalWidgetValues(testValue, newWidg, prevValObj) {
+ let prevVal = prevValObj.val;
+ if (prevVal !== undefined && evalWidgetValues(prevVal, newWidg, {'val': undefined}) === prevVal) {
+ const newVal = prevValObj.val
+ prevValObj.val = testValue
+ return newVal
+ }
+ else if ((newWidg.options?.values && newWidg.options.values.includes(testValue)) ||
+ (newWidg.options?.min <= testValue && testValue <= newWidg.options.max) ||
+ (newWidg.inputEl)) {
+ return testValue
+ }
+ else {
+ prevValObj.val = testValue
+ return newWidg.value
+ }
+ }
+
+ for (const oldWidget of oldNode.widgets ? oldNode.widgets : []) {
+ for (const newWidget of newNode.widgets ? newNode.widgets : []) {
+ if (newWidget.name === oldWidget.name) {
+ newWidget.value = evalWidgetValues(oldWidget.value, newWidget, prevValObj);
+ }
+ }
+ }
+ };
+
+ const getNodeMenuOptions = LGraphCanvas.prototype.getNodeMenuOptions;
+ LGraphCanvas.prototype.getNodeMenuOptions = function (node) {
+ const options = getNodeMenuOptions.apply(this, arguments);
+ node.setDirtyCanvas(true, true);
+
+ options.splice(options.length - 1, 0,
+ {
+ content: "Reload Node (ttN)",
+ callback: () => {
+ var graphcanvas = LGraphCanvas.active_canvas;
+ if (!graphcanvas.selected_nodes || Object.keys(graphcanvas.selected_nodes).length <= 1){
+ ttNreloadNode(node);
+ }else{
+ for (var i in graphcanvas.selected_nodes) {
+ ttNreloadNode(graphcanvas.selected_nodes[i]);
+ }
+ }
+ }
+ },
+ );
+ return options;
+ };
+ },
+ beforeRegisterNodeDef(nodeType, nodeData, app) {
+ if (nodeData.name.startsWith("ttN")) {
+ const origOnConfigure = nodeType.prototype.onConfigure;
+ nodeType.prototype.onConfigure = function () {
+ const r = origOnConfigure ? origOnConfigure.apply(this, arguments) : undefined;
+ let nodeVersion = nodeData.input.hidden?.ttNnodeVersion ? nodeData.input.hidden.ttNnodeVersion : null;
+ nodeType.ttNnodeVersion = nodeVersion;
+ this.properties['ttNnodeVersion'] = this.properties['ttNnodeVersion']?this.properties['ttNnodeVersion']:nodeVersion;
+ if (this.properties['ttNnodeVersion'] !== nodeVersion) {
+ if (!this.properties['origVals']) {
+ this.properties['origVals'] = {bgcolor: this.bgcolor, color: this.color, title: this.title}
+ }
+ this.bgcolor = "#d82129";
+ this.color = "#bd000f";
+ this.title = this.title.includes("Node Version Mismatch") ? this.title : this.title + " - Node Version Mismatch"
+ } else if (this.properties['origVals']) {
+ this.bgcolor = this.properties.origVals.bgcolor;
+ this.color = this.properties.origVals.color;
+ this.title = this.properties.origVals.title;
+ delete this.properties['origVals']
+ }
+ return r;
+ };
+ }
+ if (nodeData.name === "ttN textDebug") {
const onNodeCreated = nodeType.prototype.onNodeCreated;
nodeType.prototype.onNodeCreated = function () {
const r = onNodeCreated?.apply(this, arguments);
@@ -23,17 +118,19 @@ app.registerExtension({
this.onResize?.(this.size);
};
}
- if (nodeData.name === "ttN pipeLoader") {
- const onNodeCreated = nodeType.prototype.onNodeCreated;
- nodeType.prototype.onNodeCreated = function () {
- const r = onNodeCreated?.apply(this, arguments);
- this.widgets[22].value = "fixed"
- return r;
- };
- }
},
+ nodeCreated(node) {
+ if (node.getTitle() === "pipeLoader") {
+ for (let widget of node.widgets) {
+ if (widget.name === "control_after_generate") {
+ widget.value = "fixed"
+ }
+ }
+ }
+ },
});
+
// ttN Dropdown
var styleElement = document.createElement("style");
const cssCode = `
@@ -69,100 +166,155 @@ document.head.appendChild(styleElement);
let activeDropdown = null;
-function createDropdown(inputEl, suggestions, onSelect) {
+export function ttN_RemoveDropdown() {
if (activeDropdown) {
- activeDropdown.remove();
+ activeDropdown.removeEventListeners();
+ activeDropdown.dropdown.remove();
activeDropdown = null;
}
+}
+
+class Dropdown {
+ constructor(inputEl, suggestions, onSelect) {
+ this.dropdown = document.createElement('ul');
+ this.dropdown.setAttribute('role', 'listbox');
+ this.dropdown.classList.add('ttN-dropdown');
+ this.selectedIndex = -1;
+ this.inputEl = inputEl;
+ this.suggestions = suggestions;
+ this.onSelect = onSelect;
- const dropdown = document.createElement('ul');
- dropdown.setAttribute('role', 'listbox');
- dropdown.classList.add('ttN-dropdown');
+ this.buildDropdown();
- let selectedIndex = -1;
+ this.onKeyDownBound = this.onKeyDown.bind(this);
+ this.onWheelBound = this.onWheel.bind(this);
+ this.onClickBound = this.onClick.bind(this);
- suggestions.forEach((suggestion, index) => {
+ this.addEventListeners();
+ }
+
+ buildDropdown() {
+ this.suggestions.forEach((suggestion, index) => {
const listItem = document.createElement('li');
listItem.setAttribute('role', 'option');
listItem.textContent = suggestion;
- listItem.addEventListener('mouseover', function () {
- selectedIndex = index;
- updateSelection();
- });
- listItem.addEventListener('mouseout', function () {
- selectedIndex = -1;
- updateSelection();
- });
- listItem.addEventListener('mousedown', function (event) {
- event.preventDefault();
- onSelect(suggestion);
- dropdown.remove();
- });
- dropdown.appendChild(listItem);
- });
-
- const inputRect = inputEl.getBoundingClientRect();
- dropdown.style.top = (inputRect.top + inputRect.height) + 'px';
- dropdown.style.left = inputRect.left + 'px';
- dropdown.style.width = inputRect.width + 'px';
-
- document.body.appendChild(dropdown);
- activeDropdown = dropdown;
-
- function updateSelection() {
- Array.from(dropdown.children).forEach((li, index) => {
- if (index === selectedIndex) {
- li.classList.add('selected');
- } else {
- li.classList.remove('selected');
- }
- });
+ listItem.addEventListener('mouseover', this.onMouseOver.bind(this, index));
+ listItem.addEventListener('mouseout', this.onMouseOut.bind(this));
+ listItem.addEventListener('mousedown', this.onMouseDown.bind(this, suggestion));
+ this.dropdown.appendChild(listItem);
+ });
+
+ const inputRect = this.inputEl.getBoundingClientRect();
+ this.dropdown.style.top = (inputRect.top + inputRect.height) + 'px';
+ this.dropdown.style.left = inputRect.left + 'px';
+ this.dropdown.style.width = inputRect.width + 'px';
+
+ document.body.appendChild(this.dropdown);
+ activeDropdown = this;
+ }
+
+ addEventListeners() {
+ document.addEventListener('keydown', this.onKeyDownBound);
+ this.dropdown.addEventListener('wheel', this.onWheelBound);
+ document.addEventListener('click', this.onClickBound);
+ }
+
+ removeEventListeners() {
+ document.removeEventListener('keydown', this.onKeyDownBound);
+ this.dropdown.removeEventListener('wheel', this.onWheelBound);
+ document.removeEventListener('click', this.onClickBound);
+ }
+
+ onMouseOver(index) {
+ this.selectedIndex = index;
+ this.updateSelection();
+ }
+
+ onMouseOut() {
+ this.selectedIndex = -1;
+ this.updateSelection();
+ }
+
+ onMouseDown(suggestion, event) {
+ event.preventDefault();
+ this.onSelect(suggestion);
+ this.dropdown.remove();
+ this.removeEventListeners();
}
- inputEl.addEventListener('keydown', function (event) {
+ onKeyDown(event) {
const enterKeyCode = 13;
const escKeyCode = 27;
const arrowUpKeyCode = 38;
const arrowDownKeyCode = 40;
- const arrowRightKeyCode = 39;
- const arrowLeftKeyCode = 37;
-
- if (event.keyCode === arrowUpKeyCode) {
- event.preventDefault();
- selectedIndex = Math.max(0, selectedIndex - 1);
- updateSelection();
- } else if (event.keyCode === arrowDownKeyCode) {
- event.preventDefault();
- selectedIndex = Math.min(suggestions.length - 1, selectedIndex + 1);
- updateSelection();
- } else if (event.keyCode === arrowLeftKeyCode) {
- event.preventDefault();
- selectedIndex = 0; // Go to the first item
- updateSelection();
- } else if (event.keyCode === arrowRightKeyCode) {
- event.preventDefault();
- selectedIndex = suggestions.length - 1; // Go to the last item
- updateSelection();
- } else if (event.keyCode === enterKeyCode && selectedIndex >= 0) {
- event.preventDefault();
- onSelect(suggestions[selectedIndex]);
- dropdown.remove();
- } else if (event.keyCode === escKeyCode) {
- dropdown.remove();
+ const tabKeyCode = 9;
+
+ if (activeDropdown) {
+ if (event.keyCode === arrowUpKeyCode) {
+ event.preventDefault();
+ this.selectedIndex = Math.max(0, this.selectedIndex - 1);
+ this.updateSelection();
+ } else if (event.keyCode === arrowDownKeyCode) {
+ event.preventDefault();
+ this.selectedIndex = Math.min(this.suggestions.length - 1, this.selectedIndex + 1);
+ this.updateSelection();
+ } else if (event.keyCode === enterKeyCode) {
+ if (this.selectedIndex >= 0) {
+ event.preventDefault();
+ this.onSelect(this.suggestions[this.selectedIndex]);
+ this.dropdown.remove();
+ this.removeEventListeners();
+ } else {
+ event.preventDefault();
+ }
+ } else if (event.keyCode === tabKeyCode) {
+ if (this.selectedIndex >= 0) {
+ event.preventDefault();
+ this.onSelect(this.suggestions[this.selectedIndex]);
+ this.dropdown.remove();
+ this.removeEventListeners();
+ } else {
+ event.preventDefault();
+ }
+ } else if (event.keyCode === escKeyCode) {
+ this.dropdown.remove();
+ this.removeEventListeners();
+ }
+ } else {
+ if (event.keyCode === enterKeyCode) {
+ event.preventDefault();
+ }
}
- });
+ }
- dropdown.addEventListener('wheel', function (event) {
- // Update dropdown.style.top by +/- 10px based on scroll direction
- const top = parseInt(dropdown.style.top);
- dropdown.style.top = (top + (event.deltaY < 0 ? -10 : 10)) + "px";
- });
+ onWheel(event) {
+ const top = parseInt(this.dropdown.style.top);
+ if (localStorage.getItem("Comfy.Settings.Comfy.InvertMenuScrolling")) {
+ this.dropdown.style.top = (top + (event.deltaY < 0 ? 10 : -10)) + "px";
+ } else {
+ this.dropdown.style.top = (top + (event.deltaY < 0 ? -10 : 10)) + "px";
+ }
+ }
- document.addEventListener('click', function (event) {
- if (!dropdown.contains(event.target)) {
- dropdown.remove();
+ onClick(event) {
+ if (!this.dropdown.contains(event.target) && event.target !== this.inputEl) {
+ this.dropdown.remove();
+ this.removeEventListeners();
}
- });
+ }
+
+ updateSelection() {
+ Array.from(this.dropdown.children).forEach((li, index) => {
+ if (index === this.selectedIndex) {
+ li.classList.add('selected');
+ } else {
+ li.classList.remove('selected');
+ }
+ });
+ }
}
-export {createDropdown};
\ No newline at end of file
+export function ttN_CreateDropdown(inputEl, suggestions, onSelect) {
+ ttN_RemoveDropdown();
+ new Dropdown(inputEl, suggestions, onSelect);
+}
\ No newline at end of file
diff --git a/js/ttNdynamicWidgets.js b/js/ttNdynamicWidgets.js
index bc29572..7a56a6e 100644
--- a/js/ttNdynamicWidgets.js
+++ b/js/ttNdynamicWidgets.js
@@ -67,28 +67,42 @@ function widgetLogic(node, widget) {
toggleWidget(node, findWidgetByName(node, 'percent'))
toggleWidget(node, findWidgetByName(node, 'width'))
toggleWidget(node, findWidgetByName(node, 'height'))
+ toggleWidget(node, findWidgetByName(node, 'longer_side'))
toggleWidget(node, findWidgetByName(node, 'crop'))
} else {
toggleWidget(node, findWidgetByName(node, 'rescale_method'), true)
toggleWidget(node, findWidgetByName(node, 'rescale'), true)
- if (findWidgetByName(node, 'rescale').value === 'by percentage') {
+
+ let rescale_value = findWidgetByName(node, 'rescale').value
+
+ if (rescale_value === 'by percentage') {
toggleWidget(node, findWidgetByName(node, 'percent'), true)
- } else {
+ } else if (rescale_value === 'to Width/Height') {
toggleWidget(node, findWidgetByName(node, 'width'), true)
toggleWidget(node, findWidgetByName(node, 'height'), true)
+ } else {
+ toggleWidget(node, findWidgetByName(node, 'longer_side'), true)
}
toggleWidget(node, findWidgetByName(node, 'crop'), true)
}
}
if (widget.name === 'rescale') {
- if (widget.value === 'by percentage' && findWidgetByName(node, 'rescale_after_model').value === true) {
+ let rescale_after_model = findWidgetByName(node, 'rescale_after_model').value
+ if (widget.value === 'by percentage' && rescale_after_model) {
toggleWidget(node, findWidgetByName(node, 'width'))
toggleWidget(node, findWidgetByName(node, 'height'))
+ toggleWidget(node, findWidgetByName(node, 'longer_side'))
toggleWidget(node, findWidgetByName(node, 'percent'), true)
- } else if (widget.value === 'to Width/Height' && findWidgetByName(node, 'rescale_after_model').value === true) {
+ } else if (widget.value === 'to Width/Height' && rescale_after_model) {
toggleWidget(node, findWidgetByName(node, 'width'), true)
toggleWidget(node, findWidgetByName(node, 'height'), true)
toggleWidget(node, findWidgetByName(node, 'percent'))
+ toggleWidget(node, findWidgetByName(node, 'longer_side'))
+ } else if (rescale_after_model) {
+ toggleWidget(node, findWidgetByName(node, 'longer_side'), true)
+ toggleWidget(node, findWidgetByName(node, 'width'))
+ toggleWidget(node, findWidgetByName(node, 'height'))
+ toggleWidget(node, findWidgetByName(node, 'percent'))
}
}
if (widget.name === 'upscale_method') {
@@ -105,10 +119,14 @@ function widgetLogic(node, widget) {
toggleWidget(node, findWidgetByName(node, 'save_prefix'))
toggleWidget(node, findWidgetByName(node, 'output_path'))
toggleWidget(node, findWidgetByName(node, 'embed_workflow'))
+ toggleWidget(node, findWidgetByName(node, 'number_padding'))
+ toggleWidget(node, findWidgetByName(node, 'overwrite_existing'))
} else if (widget.value === 'Save' || widget.value === 'Hide/Save') {
toggleWidget(node, findWidgetByName(node, 'save_prefix'), true)
toggleWidget(node, findWidgetByName(node, 'output_path'), true)
toggleWidget(node, findWidgetByName(node, 'embed_workflow'), true)
+ toggleWidget(node, findWidgetByName(node, 'number_padding'), true)
+ toggleWidget(node, findWidgetByName(node, 'overwrite_existing'), true)
}
}
}
@@ -128,8 +146,10 @@ function getSetters(node) {
return widgetValue;
},
set(newVal) {
- widgetValue = newVal;
- widgetLogic(node, w);
+ if (newVal !== widgetValue) {
+ widgetValue = newVal;
+ widgetLogic(node, w);
+ }
}
});
}
diff --git a/js/ttNembedAC.js b/js/ttNembedAC.js
index 203702d..b37690a 100644
--- a/js/ttNembedAC.js
+++ b/js/ttNembedAC.js
@@ -1,21 +1,18 @@
import { app } from "/scripts/app.js";
-import { createDropdown } from "./ttN.js";
-
-fetch('extensions/tinyterraNodes/embeddingsList.json')
- .then(response => response.json())
- .then(data => {
- embeddingsList = data.map(embedding => "embedding:" + embedding);
- })
- .catch(error => {
- console.error('Error:', error);
- });
+import { ttN_CreateDropdown, ttN_RemoveDropdown } from "./ttN.js";
let embeddingsList = [];
app.registerExtension({
name: "comfy.ttN.embeddingAC",
+ async beforeRegisterNodeDef(nodeType, nodeData, app) {
+ if (nodeData.name === "ttN pipeKSampler") {
+ embeddingsList = nodeData.input.hidden.embeddingsList[0];
+ embeddingsList = embeddingsList.map(embedding => "embedding:" + embedding);
+ }
+ },
nodeCreated(node) {
- if (node.widgets) {
+ if (node.widgets && node.getTitle() !== "xyPlot") {
const widgets = node.widgets.filter(
(n) => (n.type === "customtext" && n.dynamicPrompts !== false) || n.dynamicPrompts
);
@@ -35,16 +32,20 @@ app.registerExtension({
if (suggestionkey.startsWith(currentSegmentLower) && currentSegmentLower.length > 2 || currentSegmentLower.startsWith(suggestionkey)) {
const filteredEmbeddingsList = embeddingsList.filter(s => s.toLowerCase().includes(currentSegmentLower));
if (filteredEmbeddingsList.length > 0) {
- createDropdown(w.inputEl, filteredEmbeddingsList, (selectedSuggestion) => {
+ ttN_CreateDropdown(w.inputEl, filteredEmbeddingsList, (selectedSuggestion) => {
const newText = replaceLastEmbeddingSegment(w.inputEl.value, selectedSuggestion);
w.inputEl.value = newText;
});
- }
+ }
+ } else {
+ ttN_RemoveDropdown()
}
};
w.inputEl.removeEventListener('input', onInput);
w.inputEl.addEventListener('input', onInput);
+ w.inputEl.removeEventListener('mousedown', onInput);
+ w.inputEl.addEventListener('mousedown', onInput);
function replaceLastEmbeddingSegment(inputText, selectedSuggestion) {
const cursorPosition = w.inputEl.selectionStart;
diff --git a/js/ttNinterface.js b/js/ttNinterface.js
new file mode 100644
index 0000000..7090965
--- /dev/null
+++ b/js/ttNinterface.js
@@ -0,0 +1,493 @@
+import { app } from "/scripts/app.js";
+
+const customPipeLineLink = "#7737AA"
+const customIntLink = "#29699C"
+const customXYPlotLink = "#74DA5D"
+
+var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {};
+if (!customLinkColors["PIPE_LINE"] || !LGraphCanvas.link_type_colors["PIPE_LINE"]) {customLinkColors["PIPE_LINE"] = customPipeLineLink;}
+if (!customLinkColors["INT"] || !LGraphCanvas.link_type_colors["INT"]) {customLinkColors["INT"] = customIntLink;}
+if (!customLinkColors["XYPLOT"] || !LGraphCanvas.link_type_colors["XYPLOT"]) {customLinkColors["XYPLOT"] = customXYPlotLink;}
+
+localStorage.setItem('Comfy.Settings.ttN.customLinkColors', JSON.stringify(customLinkColors));
+
+
+app.registerExtension({
+ name: "comfy.ttN.interface",
+ init() {
+ function adjustToGrid(val, gridSize) {
+ return Math.round(val / gridSize) * gridSize;
+ }
+
+ function moveNodeBasedOnKey(e, node, gridSize, shiftMult) {
+ switch (e.code) {
+ case 'ArrowUp':
+ node.pos[1] -= gridSize * shiftMult;
+ break;
+ case 'ArrowDown':
+ node.pos[1] += gridSize * shiftMult;
+ break;
+ case 'ArrowLeft':
+ node.pos[0] -= gridSize * shiftMult;
+ break;
+ case 'ArrowRight':
+ node.pos[0] += gridSize * shiftMult;
+ break;
+ }
+ node.setDirtyCanvas(true, true);
+ }
+
+ function keyMoveNode(e, node) {
+ let gridSize = JSON.parse(localStorage.getItem('Comfy.Settings.Comfy.SnapToGrid.GridSize'));
+ gridSize = gridSize ? parseInt(gridSize) : 1;
+ let shiftMult = e.shiftKey ? 10 : 1;
+
+ node.pos[0] = adjustToGrid(node.pos[0], gridSize);
+ node.pos[1] = adjustToGrid(node.pos[1], gridSize);
+
+ moveNodeBasedOnKey(e, node, gridSize, shiftMult);
+ }
+
+ function getSelectedNodes(e) {
+ const inputField = e.composedPath()[0];
+ if (inputField.tagName === "TEXTAREA") return;
+ if (e.ctrlKey && ['ArrowUp', 'ArrowDown', 'ArrowLeft', 'ArrowRight'].includes(e.code)) {
+ e.stopPropagation();
+ let graphcanvas = LGraphCanvas.active_canvas;
+ for (let node in graphcanvas.selected_nodes) {
+ keyMoveNode(e, graphcanvas.selected_nodes[node]);
+ }
+ }
+ }
+
+ window.addEventListener("keydown", getSelectedNodes, true);
+
+ LGraphCanvas.prototype.ttNcreateDialog = function (htmlContent, onOK, onCancel) {
+ var dialog = document.createElement("div");
+ dialog.is_modified = false;
+ dialog.className = "ttN-dialog";
+ dialog.innerHTML = htmlContent + "";
+
+ dialog.close = function() {
+ if (dialog.parentNode) {
+ dialog.parentNode.removeChild(dialog);
+ }
+ };
+
+ var inputs = Array.from(dialog.querySelectorAll("input, select"));
+
+ inputs.forEach(input => {
+ input.addEventListener("keydown", function(e) {
+ dialog.is_modified = true;
+ if (e.keyCode == 27) { // ESC
+ onCancel && onCancel();
+ dialog.close();
+ } else if (e.keyCode == 13) { // Enter
+ onOK && onOK(dialog, inputs.map(input => input.value));
+ dialog.close();
+ } else if (e.keyCode != 13 && e.target.localName != "textarea") {
+ return;
+ }
+ e.preventDefault();
+ e.stopPropagation();
+ });
+ });
+
+ var graphcanvas = LGraphCanvas.active_canvas;
+ var canvas = graphcanvas.canvas;
+
+ var rect = canvas.getBoundingClientRect();
+ var offsetx = -20;
+ var offsety = -20;
+ if (rect) {
+ offsetx -= rect.left;
+ offsety -= rect.top;
+ }
+
+ if (event) {
+ dialog.style.left = event.clientX + offsetx + "px";
+ dialog.style.top = event.clientY + offsety + "px";
+ } else {
+ dialog.style.left = canvas.width * 0.5 + offsetx + "px";
+ dialog.style.top = canvas.height * 0.5 + offsety + "px";
+ }
+
+ var button = dialog.querySelector("#ok");
+ button.addEventListener("click", function() {
+ onOK && onOK(dialog, inputs.map(input => input.value));
+ dialog.close();
+ });
+
+ canvas.parentNode.appendChild(dialog);
+
+ if(inputs) inputs[0].focus();
+
+ var dialogCloseTimer = null;
+ dialog.addEventListener("mouseleave", function(e) {
+ if(LiteGraph.dialog_close_on_mouse_leave)
+ if (!dialog.is_modified && LiteGraph.dialog_close_on_mouse_leave)
+ dialogCloseTimer = setTimeout(dialog.close, LiteGraph.dialog_close_on_mouse_leave_delay); //dialog.close();
+ });
+ dialog.addEventListener("mouseenter", function(e) {
+ if(LiteGraph.dialog_close_on_mouse_leave)
+ if(dialogCloseTimer) clearTimeout(dialogCloseTimer);
+ });
+
+ return dialog;
+ };
+
+ LGraphCanvas.prototype.ttNsetNodeDimension = function (node) {
+ const nodeWidth = node.size[0];
+ const nodeHeight = node.size[1];
+
+ let input_html = "";
+ input_html += "";
+
+ LGraphCanvas.prototype.ttNcreateDialog("Width/Height" + input_html,
+ function(dialog, values) {
+ var widthValue = Number(values[0]) ? values[0] : nodeWidth;
+ var heightValue = Number(values[1]) ? values[1] : nodeHeight;
+ let sz = node.computeSize();
+ node.setSize([Math.max(sz[0], widthValue), Math.max(sz[1], heightValue)]);
+ if (dialog.parentNode) {
+ dialog.parentNode.removeChild(dialog);
+ }
+ node.setDirtyCanvas(true, true);
+ },
+ null
+ );
+ };
+
+ LGraphCanvas.prototype.ttNsetSlotTypeColor = function(slot){
+ var slotColor = LGraphCanvas.link_type_colors[slot.output.type].toUpperCase();
+ var slotType = slot.output.type;
+ // Check if the color is in the correct format
+ if (!/^#([0-9A-F]{3}){1,2}$/i.test(slotColor)) {
+ slotColor = "#FFFFFF";
+ }
+
+ // Check if browser supports color input type
+ var inputType = "color";
+ var inputID = " id='colorPicker'";
+ var inputElem = document.createElement("input");
+ inputElem.setAttribute("type", inputType);
+ if (inputElem.type !== "color") {
+ // If it doesn't, fall back to text input
+ inputType = "text";
+ inputID = " ";
+ }
+
+ let input_html = "";
+ input_html += ""; // Add a default button
+ input_html += ""; // Add a reset button
+
+ var dialog = LGraphCanvas.prototype.ttNcreateDialog("" + slotType + "" +
+ input_html,
+ function(dialog, values){
+ var hexColor = values[0].toUpperCase();
+
+ if (!/^#([0-9A-F]{3}){1,2}$/i.test(hexColor)) {
+ return
+ }
+
+ if (hexColor === slotColor) {
+ return
+ }
+
+ var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {};
+ if (!customLinkColors[slotType + "_ORIG"]) {customLinkColors[slotType + "_ORIG"] = slotColor};
+ customLinkColors[slotType] = hexColor;
+ localStorage.setItem('Comfy.Settings.ttN.customLinkColors', JSON.stringify(customLinkColors));
+
+ app.canvas.default_connection_color_byType[slotType] = hexColor;
+ LGraphCanvas.link_type_colors[slotType] = hexColor;
+ }
+ );
+
+ var resetButton = dialog.querySelector("#reset");
+ resetButton.addEventListener("click", function() {
+ var colorInput = dialog.querySelector("input[type='" + inputType + "']");
+ colorInput.value = slotColor;
+ });
+
+ var defaultButton = dialog.querySelector("#Default");
+ defaultButton.addEventListener("click", function() {
+ var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {};
+ if (customLinkColors[slotType+"_ORIG"]) {
+ app.canvas.default_connection_color_byType[slotType] = customLinkColors[slotType+"_ORIG"];
+ LGraphCanvas.link_type_colors[slotType] = customLinkColors[slotType+"_ORIG"];
+
+ delete customLinkColors[slotType+"_ORIG"];
+ delete customLinkColors[slotType];
+ }
+ localStorage.setItem('Comfy.Settings.ttN.customLinkColors', JSON.stringify(customLinkColors));
+ dialog.close()
+ })
+
+ var colorPicker = dialog.querySelector("input[type='" + inputType + "']");
+ colorPicker.addEventListener("focusout", function(e) {
+ this.focus();
+ });
+ };
+
+ LGraphCanvas.prototype.ttNdefaultBGcolor = function(node, defaultBGColor){
+ setTimeout(() => {
+ if (defaultBGColor !== 'default' && !node.color) {
+ node.addProperty('ttNbgOverride', defaultBGColor);
+ node.color=defaultBGColor.color;
+ node.bgcolor=defaultBGColor.bgcolor;
+ }
+
+ if (node.color && node.properties.ttNbgOverride) {
+ if (node.properties.ttNbgOverride !== defaultBGColor && node.color === node.properties.ttNbgOverride.color) {
+ if (defaultBGColor === 'default') {
+ delete node.properties.ttNbgOverride
+ delete node.color
+ delete node.bgcolor
+ } else {
+ node.properties.ttNbgOverride = defaultBGColor
+ node.color=defaultBGColor.color;
+ node.bgcolor=defaultBGColor.bgcolor;
+ }
+ }
+
+ if (node.properties.ttNbgOverride !== defaultBGColor && node.color !== node.properties.ttNbgOverride?.color) {
+ delete node.properties.ttNbgOverride
+ }
+ }
+ }, 0);
+ };
+
+ LGraphCanvas.ttNonShowLinkStyles = function(value, options, e, menu, node) {
+ new LiteGraph.ContextMenu(
+ LiteGraph.LINK_RENDER_MODES,
+ { event: e, callback: inner_clicked, parentMenu: menu, node: node }
+ );
+
+ function inner_clicked(v) {
+ if (!node) {
+ return;
+ }
+ var kV = Object.values(LiteGraph.LINK_RENDER_MODES).indexOf(v);
+
+ localStorage.setItem('Comfy.Settings.ttN.links_render_mode', JSON.stringify(kV));
+ if (localStorage.getItem('Comfy.Settings.pysssss.LinkRenderMode')) {
+ localStorage.setItem('Comfy.Settings.pysssss.LinkRenderMode', JSON.stringify(kV));
+ }
+
+ app.canvas.links_render_mode = kV;
+ }
+
+ return false;
+ };
+
+ LGraphCanvas.ttNsetDefaultBGColor = function(value, options, e, menu, node) {
+ if (!node) {
+ throw "no node for color";
+ }
+
+ var values = [];
+ values.push({
+ value: null,
+ content:
+ "No Color"
+ });
+
+ for (var i in LGraphCanvas.node_colors) {
+ var color = LGraphCanvas.node_colors[i];
+ var value = {
+ value: i,
+ content:
+ "" +
+ i +
+ ""
+ };
+ values.push(value);
+ }
+ new LiteGraph.ContextMenu(values, {
+ event: e,
+ callback: inner_clicked,
+ parentMenu: menu,
+ node: node
+ });
+
+ function inner_clicked(v) {
+ if (!node) {
+ return;
+ }
+
+ var defaultBGColor = v.value ? LGraphCanvas.node_colors[v.value] : 'default';
+
+ localStorage.setItem('Comfy.Settings.ttN.defaultBGColor', JSON.stringify(defaultBGColor));
+
+ for (var i in app.graph._nodes) {
+ LGraphCanvas.prototype.ttNdefaultBGcolor(app.graph._nodes[i], defaultBGColor);
+ }
+
+ node.setDirtyCanvas(true, true);
+ }
+
+ return false;
+ };
+
+ const getNodeMenuOptions = LGraphCanvas.prototype.getNodeMenuOptions;
+ LGraphCanvas.prototype.getNodeMenuOptions = function (node) {
+ const options = getNodeMenuOptions.apply(this, arguments);
+ node.setDirtyCanvas(true, true);
+
+ options.splice(options.length - 1, 0,
+ {
+ content: "Node Dimensions (ttN)",
+ callback: () => { LGraphCanvas.prototype.ttNsetNodeDimension(node); }
+ },
+ {
+ content: "Default BG Color (ttN)",
+ has_submenu: true,
+ callback: LGraphCanvas.ttNsetDefaultBGColor
+ },
+ null
+ );
+ return options;
+ };
+ },
+
+ beforeRegisterNodeDef(nodeType, nodeData, app) {
+ nodeType.prototype.getSlotMenuOptions = (slot) => {
+ let menu_info = [];
+ if (
+ slot &&
+ slot.output &&
+ slot.output.links &&
+ slot.output.links.length
+ ) {
+ menu_info.push({ content: "Disconnect Links", slot: slot });
+ }
+ var _slot = slot.input || slot.output;
+ if (_slot.removable){
+ menu_info.push(
+ _slot.locked
+ ? "Cannot remove"
+ : { content: "Remove Slot", slot: slot }
+ );
+ }
+ if (!_slot.nameLocked){
+ menu_info.push({ content: "Rename Slot", slot: slot });
+ }
+
+ menu_info.push({ content: "Slot Type Color (ttN)", slot: slot, callback: () => { LGraphCanvas.prototype.ttNsetSlotTypeColor(slot) } });
+ menu_info.push({ content: "Link Style (ttN)", has_submenu: true, slot: slot, callback: LGraphCanvas.ttNonShowLinkStyles });
+
+ return menu_info;
+ }
+ },
+
+ setup() {
+ let customLinkType = Number(localStorage.getItem('Comfy.Settings.ttN.links_render_mode'));
+ if (customLinkType) {app.canvas.links_render_mode = customLinkType}
+
+ var customLinkColors = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.customLinkColors')) || {};
+ Object.assign(app.canvas.default_connection_color_byType, customLinkColors);
+ Object.assign(LGraphCanvas.link_type_colors, customLinkColors);
+
+ },
+ nodeCreated(node) {
+ let defaultBGColor = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.defaultBGColor'));
+ if (defaultBGColor) {LGraphCanvas.prototype.ttNdefaultBGcolor(node, defaultBGColor)};
+ },
+ loadedGraphNode(node, app) {
+ let defaultBGColor = JSON.parse(localStorage.getItem('Comfy.Settings.ttN.defaultBGColor'));
+ if (defaultBGColor) {LGraphCanvas.prototype.ttNdefaultBGcolor(node, defaultBGColor)};
+ },
+});
+
+var styleElement = document.createElement("style");
+const cssCode = `
+.ttN-dialog {
+ top: 10px;
+ left: 10px;
+ min-height: 1em;
+ background-color: var(--comfy-menu-bg);
+ font-size: 1.2em;
+ box-shadow: 0 0 7px black !important;
+ z-index: 10;
+ display: grid;
+ border-radius: 7px;
+ padding: 7px 7px;
+ position: fixed;
+}
+.ttN-dialog .name {
+ display: inline-block;
+ min-height: 1.5em;
+ font-size: 14px;
+ font-family: sans-serif;
+ color: var(--descrip-text);
+ padding: 0;
+ vertical-align: middle;
+ justify-self: center;
+}
+.ttN-dialog input,
+.ttN-dialog textarea,
+.ttN-dialog select {
+ margin: 3px;
+ min-width: 60px;
+ min-height: 1.5em;
+ background-color: var(--comfy-input-bg);
+ border: 2px solid;
+ border-color: var(--border-color);
+ color: var(--input-text);
+ border-radius: 14px;
+ padding-left: 10px;
+ outline: none;
+}
+
+.ttN-dialog #colorPicker {
+ margin: 0px;
+ min-width: 100%;
+ min-height: 2.5em;
+ border-radius: 0px;
+ padding: 0px 2px 0px 2px;
+ border: unset;
+}
+
+.ttN-dialog textarea {
+ min-height: 150px;
+}
+
+.ttN-dialog button {
+ margin-top: 3px;
+ vertical-align: top;
+ background-color: #999;
+ border: 0;
+ padding: 4px 18px;
+ border-radius: 20px;
+ cursor: pointer;
+}
+
+.ttN-dialog button.rounded,
+.ttN-dialog input.rounded {
+ border-radius: 0 12px 12px 0;
+}
+
+.ttN-dialog .helper {
+ overflow: auto;
+ max-height: 200px;
+}
+
+.ttN-dialog .help-item {
+ padding-left: 10px;
+}
+
+.ttN-dialog .help-item:hover,
+.ttN-dialog .help-item.selected {
+ cursor: pointer;
+ background-color: white;
+ color: black;
+}
+
+`
+styleElement.innerHTML = cssCode
+document.head.appendChild(styleElement);
\ No newline at end of file
diff --git a/js/ttNstyles.js b/js/ttNstyles.js
deleted file mode 100644
index 2efdcfb..0000000
--- a/js/ttNstyles.js
+++ /dev/null
@@ -1,62 +0,0 @@
-import { app } from "/scripts/app.js";
-
-const customPipeLineLink = "#7737AA"
-const customIntLink = "#29699C"
-const overrideBGColor = 'default'
-const customLinkType = 2
-
-let ttNbgOverride = 'default'
-
-const customLinkColors = {
- "PIPE_LINE": customPipeLineLink, "INT": customIntLink,
-}
-
-if (overrideBGColor !== 'default') {
- ttNbgOverride = {
- color: LGraphCanvas.node_colors[overrideBGColor].color,
- bgcolor: LGraphCanvas.node_colors[overrideBGColor].bgcolor,
- groupcolor: LGraphCanvas.node_colors[overrideBGColor].groupcolor
- }
-}
-
-app.registerExtension({
- name: "comfy.ttN.styles",
- beforeRegisterNodeDef(nodeType, nodeData, app) {
- nodeType.prototype.onNodeCreated = function () {
- if (overrideBGColor !== 'default' && !this.color) {
- this.addProperty('ttNbgOverride', overrideBGColor);
- this.color=LGraphCanvas.node_colors[overrideBGColor].color;
- this.bgcolor=LGraphCanvas.node_colors[overrideBGColor].bgcolor;
- }
- }
- },
- setup() {
- app.canvas.links_render_mode = customLinkType
- Object.assign(app.canvas.default_connection_color_byType, customLinkColors);
- Object.assign(LGraphCanvas.link_type_colors, customLinkColors);
- },
- loadedGraphNode(node, app) {
- const NP_ttNbgOverride = node.properties.ttNbgOverride
- if (overrideBGColor !== 'default' && !node.color) {
- node.addProperty('ttNbgOverride', overrideBGColor);
- node.color=LGraphCanvas.node_colors[overrideBGColor].color;
- node.bgcolor=LGraphCanvas.node_colors[overrideBGColor].bgcolor;
- }
-
- if (node.color && node.properties.ttNbgOverride) {
- if (node.properties.ttNbgOverride !== overrideBGColor && node.color === LGraphCanvas.node_colors[NP_ttNbgOverride].color) {
- if (overrideBGColor === 'default') {
- delete node.properties.ttNbgOverride
- delete node.color
- delete node.bgcolor
- } else {
- node.properties.ttNbgOverride = overrideBGColor
- node.color=LGraphCanvas.node_colors[overrideBGColor].color;
- node.bgcolor=LGraphCanvas.node_colors[overrideBGColor].bgcolor;
- }
- } else if (node.properties.ttNbgOverride !== overrideBGColor && node.color !== LGraphCanvas.node_colors[NP_ttNbgOverride].color) {
- delete node.properties.ttNbgOverride
- }
- }
- },
-});
\ No newline at end of file
diff --git a/js/ttNxyPlot.js b/js/ttNxyPlot.js
new file mode 100644
index 0000000..e53b84e
--- /dev/null
+++ b/js/ttNxyPlot.js
@@ -0,0 +1,212 @@
+import { app } from "/scripts/app.js";
+import { ttN_CreateDropdown, ttN_RemoveDropdown } from "./ttN.js";
+
+function generateNumList(dictionary) {
+ const minimum = dictionary["min"] || 0;
+ const maximum = dictionary["max"] || 0;
+ const step = dictionary["step"] || 1;
+
+ if (step === 0) {
+ return [];
+ }
+
+ const result = [];
+ let currentValue = minimum;
+
+ while (currentValue <= maximum) {
+ if (Number.isInteger(step)) {
+ result.push(Math.round(currentValue) + '; ');
+ } else {
+ let formattedValue = currentValue.toFixed(3);
+ if(formattedValue == -0.000){
+ formattedValue = '0.000';
+ }
+ if (!/\.\d{3}$/.test(formattedValue)) {
+ formattedValue += "0";
+ }
+ result.push(formattedValue + "; ");
+ }
+ currentValue += step;
+ }
+
+ if (maximum >= 0 && minimum >= 0) {
+ //low to high
+ return result;
+ }
+ else {
+ //high to low
+ return result.reverse();
+ }
+}
+
+let plotDict = {};
+let currentOptionsDict = {};
+
+function getCurrentOptionLists(node, widget) {
+ const nodeId = String(node.id);
+ const widgetName = widget.name;
+ const widgetValue = widget.value.replace(/^(loader|sampler):\s/, '');
+
+ if (!currentOptionsDict[nodeId] || !currentOptionsDict[nodeId][widgetName]) {
+ currentOptionsDict[nodeId] = {...currentOptionsDict[nodeId], [widgetName]: plotDict[widgetValue]};
+ } else if (currentOptionsDict[nodeId][widgetName] != plotDict[widgetValue]) {
+ currentOptionsDict[nodeId][widgetName] = plotDict[widgetValue];
+ }
+}
+
+function addGetSetters(node) {
+ if (node.widgets)
+ for (const w of node.widgets) {
+ if (w.name === "x_axis" ||
+ w.name === "y_axis") {
+ let widgetValue = w.value;
+
+ // Define getters and setters for widget values
+ Object.defineProperty(w, 'value', {
+
+ get() {
+ return widgetValue;
+ },
+ set(newVal) {
+ if (newVal !== widgetValue) {
+ widgetValue = newVal;
+ getCurrentOptionLists(node, w);
+ }
+ }
+ });
+ }
+ }
+}
+
+function dropdownCreator(node) {
+ if (node.widgets) {
+ const widgets = node.widgets.filter(
+ (n) => (n.type === "customtext" && n.dynamicPrompts !== false) || n.dynamicPrompts
+ );
+
+ for (const w of widgets) {
+ function replaceOptionSegments(selectedOption, inputSegments, cursorSegmentIndex, optionsList) {
+ if (selectedOption) {
+ inputSegments[cursorSegmentIndex] = selectedOption;
+ }
+
+ return inputSegments.map(segment => verifySegment(segment, optionsList))
+ .filter(item => item !== '')
+ .join('');
+ }
+
+ function verifySegment(segment, optionsList) {
+ segment = cleanSegment(segment);
+
+ if (isInOptionsList(segment, optionsList)) {
+ return segment + '; ';
+ }
+
+ let matchedOptions = findMatchedOptions(segment, optionsList);
+
+ if (matchedOptions.length === 1 || matchedOptions.length === 2) {
+ return matchedOptions[0];
+ }
+
+ if (isInOptionsList(formatNumberSegment(segment), optionsList)) {
+ return formatNumberSegment(segment) + '; ';
+ }
+
+ return '';
+ }
+
+ function cleanSegment(segment) {
+ return segment.replace(/(\n|;| )/g, '');
+ }
+
+ function isInOptionsList(segment, optionsList) {
+ return optionsList.includes(segment + '; ');
+ }
+
+ function findMatchedOptions(segment, optionsList) {
+ return optionsList.filter(option => option.toLowerCase().includes(segment.toLowerCase()));
+ }
+
+ function formatNumberSegment(segment) {
+ if (Number(segment)) {
+ return Number(segment).toFixed(3);
+ }
+
+ if (['0', '0.', '0.0', '0.00', '00'].includes(segment)) {
+ return '0.000';
+ }
+ return segment;
+ }
+
+
+ const onInput = function () {
+ const nodeId = String(w.parent.id);
+ const axisWidgetName = w.name[0] + '_axis';
+
+ let optionsList = currentOptionsDict[nodeId]?.[axisWidgetName] || [];
+ if (optionsList.length === 0) {return}
+
+ const inputText = w.inputEl.value;
+ const cursorPosition = w.inputEl.selectionStart;
+
+ let inputSegments = inputText.split('; ');
+
+ const cursorSegmentIndex = inputText.substring(0, cursorPosition).split('; ').length - 1;
+ const currentSegment = inputSegments[cursorSegmentIndex];
+ const currentSegmentLower = currentSegment.replace(/\n/g, '').toLowerCase();
+
+ const filteredOptionsList = optionsList.filter(option => option.toLowerCase().includes(currentSegmentLower)).map(option => option.replace(/; /g, ''));
+
+ if (filteredOptionsList.length > 0) {
+ ttN_CreateDropdown(w.inputEl, filteredOptionsList, (selectedOption) => {
+ const verifiedText = replaceOptionSegments(selectedOption, inputSegments, cursorSegmentIndex, optionsList);
+ w.inputEl.value = verifiedText;
+ });
+ }
+ else {
+ ttN_RemoveDropdown();
+ const verifiedText = replaceOptionSegments(null, inputSegments, cursorSegmentIndex, optionsList);
+ w.inputEl.value = verifiedText;
+ }
+ };
+
+ w.inputEl.removeEventListener('input', onInput);
+ w.inputEl.addEventListener('input', onInput);
+ w.inputEl.removeEventListener('mouseup', onInput);
+ w.inputEl.addEventListener('mouseup', onInput);
+ }
+ }
+}
+
+app.registerExtension({
+ name: "comfy.ttN.xyPlot",
+ async beforeRegisterNodeDef(nodeType, nodeData, app) {
+ if (nodeData.name === "ttN xyPlot") {
+ plotDict = nodeData.input.hidden.plot_dict[0];
+
+ for (const key in plotDict) {
+ const value = plotDict[key];
+ if (Array.isArray(value)) {
+ let updatedValues = [];
+ for (const v of value) {
+ updatedValues.push(v + '; ');
+ }
+ plotDict[key] = updatedValues;
+ } else if (typeof(value) === 'object') {
+ plotDict[key] = generateNumList(value);
+ } else {
+ plotDict[key] = value + '; ';
+ }
+ }
+ plotDict["None"] = [];
+ plotDict["---------------------"] = [];
+ }
+ },
+ nodeCreated(node) {
+ if (node.getTitle() === "xyPlot") {
+ addGetSetters(node);
+ dropdownCreator(node);
+
+ }
+ }
+});
\ No newline at end of file
diff --git a/tinyterraNodes.py b/tinyterraNodes.py
index b682687..60cb5a0 100644
--- a/tinyterraNodes.py
+++ b/tinyterraNodes.py
@@ -5,70 +5,181 @@
import os
import re
-import sys
import json
import torch
+import random
import datetime
import comfy.sd
import comfy.utils
import numpy as np
import folder_paths
import comfy.samplers
+import latent_preview
from torch import Tensor
from pathlib import Path
import comfy.model_management
-from nodes import common_ksampler
from PIL.PngImagePlugin import PngInfo
+from .adv_encode import advanced_encode
from PIL import Image, ImageDraw, ImageFont
from comfy.sd import ModelPatcher, CLIP, VAE
+from typing import Dict, List, Optional, Tuple, Union
from comfy_extras.chainner_models import model_loading
-# Get absolute path's of the current parent directory, of the ComfyUI directory and add to sys.path list
-my_dir = Path(__file__).parent
-comfy_dir = Path(my_dir).parent.parent
-font_path = os.path.join(my_dir, 'arial.ttf')
-sitepkg = comfy_dir.parent / 'python_embeded' / 'Lib' / 'site-packages'
-script_path = comfy_dir.parent / 'python_embeded' / 'Scripts'
+class CC:
+ CLEAN = '\33[0m'
+ BOLD = '\33[1m'
+ ITALIC = '\33[3m'
+ UNDERLINE = '\33[4m'
+ BLINK = '\33[5m'
+ BLINK2 = '\33[6m'
+ SELECTED = '\33[7m'
+
+ BLACK = '\33[30m'
+ RED = '\33[31m'
+ GREEN = '\33[32m'
+ YELLOW = '\33[33m'
+ BLUE = '\33[34m'
+ VIOLET = '\33[35m'
+ BEIGE = '\33[36m'
+ WHITE = '\33[37m'
+
+ GREY = '\33[90m'
+ LIGHTRED = '\33[91m'
+ LIGHTGREEN = '\33[92m'
+ LIGHTYELLOW = '\33[93m'
+ LIGHTBLUE = '\33[94m'
+ LIGHTVIOLET = '\33[95m'
+ LIGHTBEIGE = '\33[96m'
+ LIGHTWHITE = '\33[97m'
+
+class ttNl:
+ def __init__(self, input_string):
+ self.header_value = f'{CC.LIGHTGREEN}[ttN] {CC.GREEN}'
+ self.label_value = ''
+ self.title_value = ''
+ self.input_string = f'{input_string}{CC.CLEAN}'
+
+ def h(self, header_value):
+ self.header_value = f'{CC.LIGHTGREEN}[{header_value}] {CC.GREEN}'
+ return self
+
+ def full(self):
+ self.h('tinyterraNodes')
+ return self
+
+ def success(self):
+ self.label_value = f'Success: '
+ return self
-sys.path.append(str(comfy_dir))
-sys.path.append(str(sitepkg))
-sys.path.append(str(script_path))
+ def warn(self):
+ self.label_value = f'{CC.RED}Warning:{CC.LIGHTRED} '
+ return self
-MAX_RESOLUTION=8192
+ def error(self):
+ self.label_value = f'{CC.LIGHTRED}ERROR:{CC.RED} '
+ return self
-# Tensor to PIL & PIL to Tensor (from WAS Suite)
-def tensor2pil(image: torch.Tensor) -> Image.Image:
- return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
+ def t(self, title_value):
+ self.title_value = f'{title_value}:{CC.CLEAN} '
+ return self
+
+ def p(self):
+ print(self.header_value + self.label_value + self.title_value + self.input_string)
-def pil2tensor(image: Image.Image) -> torch.Tensor:
- return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
+class ttNpaths:
+ ComfyUI = folder_paths.base_path
+ tinyterraNodes = Path(__file__).parent
+ font_path = os.path.join(tinyterraNodes, 'arial.ttf')
+
+# Globals
+ttN_version = '1.0.0'
+
+MAX_RESOLUTION=8192
-# Cache models in RAM
loaded_objects = {
"ckpt": [], # (ckpt_name, model)
"clip": [], # (ckpt_name, clip)
"bvae": [], # (ckpt_name, vae)
"vae": [], # (vae_name, vae)
- "lora": [], # (lora_name, model_name, model_lora, clip_lora, strength_model, strength_clip)
+ "lora": {}, # {lora_name: {uid: (model_lora, clip_lora)}}
}
+last_helds: dict[str, list] = {
+ "results": [],
+ "samples": [],
+ "images": [],
+ "vae_decode": []
+}
+
+def clean_values(values):
+ original_values = values.split("; ")
+ cleaned_values = []
+
+ for value in original_values:
+ # Strip the semi-colon
+ cleaned_value = value.strip(';').strip()
+
+ if cleaned_value == "":
+ continue
+
+ # Try to convert the cleaned_value back to int or float if possible
+ try:
+ cleaned_value = int(cleaned_value)
+ except ValueError:
+ try:
+ cleaned_value = float(cleaned_value)
+ except ValueError:
+ pass
+
+ # Append the cleaned_value to the list
+ cleaned_values.append(cleaned_value)
+
+ return cleaned_values
+
+# Loader Functions
def update_loaded_objects(prompt):
global loaded_objects
- # Extract all Efficient Loader class type entries
+ # Extract all Loader class type entries
ttN_pipeLoader_entries = [entry for entry in prompt.values() if entry["class_type"] == "ttN pipeLoader"]
-
+ ttN_pipeKSampler_entries = [entry for entry in prompt.values() if entry["class_type"] == "ttN pipeKSampler"]
+ ttN_XYPlot_entries = [entry for entry in prompt.values() if entry["class_type"] == "ttN xyPlot"]
# Collect all desired model, vae, and lora names
desired_ckpt_names = set()
desired_vae_names = set()
desired_lora_names = set()
+ desired_lora_settings = set()
for entry in ttN_pipeLoader_entries:
desired_ckpt_names.add(entry["inputs"]["ckpt_name"])
desired_vae_names.add(entry["inputs"]["vae_name"])
desired_lora_names.add(entry["inputs"]["lora1_name"])
+ desired_lora_settings.add(f'{entry["inputs"]["lora1_name"]};{entry["inputs"]["lora1_model_strength"]};{entry["inputs"]["lora1_clip_strength"]}')
desired_lora_names.add(entry["inputs"]["lora2_name"])
+ desired_lora_settings.add(f'{entry["inputs"]["lora2_name"]};{entry["inputs"]["lora2_model_strength"]};{entry["inputs"]["lora2_clip_strength"]}')
desired_lora_names.add(entry["inputs"]["lora3_name"])
+ desired_lora_settings.add(f'{entry["inputs"]["lora3_name"]};{entry["inputs"]["lora3_model_strength"]};{entry["inputs"]["lora3_clip_strength"]}')
+ for entry in ttN_pipeKSampler_entries:
+ desired_lora_names.add(entry["inputs"]["lora_name"])
+ desired_lora_settings.add(f'{entry["inputs"]["lora_name"]};{entry["inputs"]["lora_model_strength"]};{entry["inputs"]["lora_clip_strength"]}')
+ for entry in ttN_XYPlot_entries:
+ x_entry = entry["inputs"]["x_axis"].split(": ")[1] if entry["inputs"]["x_axis"] not in ttN_XYPlot.rejected else "None"
+ y_entry = entry["inputs"]["y_axis"].split(": ")[1] if entry["inputs"]["y_axis"] not in ttN_XYPlot.rejected else "None"
+
+ def add_desired_plot_values(axis_entry, axis_values):
+ vals = clean_values(entry["inputs"][axis_values])
+ if axis_entry == "vae_name":
+ for v in vals:
+ desired_vae_names.add(v)
+ elif axis_entry == "ckpt_name":
+ for v in vals:
+ desired_ckpt_names.add(v)
+ elif axis_entry in ["lora1_name", "lora2_name", "lora3_name"]:
+ for v in vals:
+ desired_lora_names.add(v)
+
+ add_desired_plot_values(x_entry, "x_values")
+ add_desired_plot_values(y_entry, "y_values")
# Check and clear unused ckpt, clip, and bvae entries
for list_key in ["ckpt", "clip", "bvae"]:
@@ -81,12 +192,21 @@ def update_loaded_objects(prompt):
for index in sorted(unused_vae_indices, reverse=True):
loaded_objects["vae"].pop(index)
+ loaded_ckpt_hashes = set()
+ for ckpt in loaded_objects["ckpt"]:
+ loaded_ckpt_hashes.add(str(ckpt[1])[33:-1])
+
# Check and clear unused lora entries
- unused_lora_indices = [i for i, entry in enumerate(loaded_objects["lora"]) if entry[0] not in desired_lora_names]
- for index in sorted(unused_lora_indices, reverse=True):
- loaded_objects["lora"].pop(index)
+ for lora_name, lora_models in dict(loaded_objects["lora"]).items():
+ if lora_name not in desired_lora_names:
+ loaded_objects["lora"].pop(lora_name)
+ else:
+ for UID in list(lora_models.keys()):
+ used_model_hash, lora_settings= UID.split(";", 1)
+ if used_model_hash not in loaded_ckpt_hashes or lora_settings not in desired_lora_settings:
+ loaded_objects["lora"][lora_name].pop(UID)
-def load_checkpoint(ckpt_name,output_vae=True, output_clip=True):
+def load_checkpoint(ckpt_name, output_vae=True, output_clip=True):
"""
Searches for tuple index that contains ckpt_name in "ckpt" array of loaded_objects.
If found, extracts the model, clip, and vae from the loaded_objects.
@@ -148,41 +268,235 @@ def load_lora(lora_name, model, clip, strength_model, strength_clip):
"""
global loaded_objects
- # Get the model_name (ckpt_name) from the first entry in loaded_objects
- model_name = loaded_objects["ckpt"][0][0] if loaded_objects["ckpt"] else None
+ # Get the model_hash as string
+ input_model_hash = str(model)[33:-1]
- # Check if lora_name exists in "lora" array
- existing_lora = [entry for entry in loaded_objects["lora"] if entry[0] == lora_name]
-
- if existing_lora:
- lora_name, stored_model_name, model_lora, clip_lora, stored_strength_model, stored_strength_clip = existing_lora[0]
-
- # Check if the model_name, strength_model, and strength_clip are the same
- if model_name == stored_model_name and strength_model == stored_strength_model and strength_clip == stored_strength_clip:
- # Check if the model has not changed in the loaded_objects
- existing_model = [entry for entry in loaded_objects["ckpt"] if entry[0] == model_name]
- if existing_model and existing_model[0][1] == model:
- return model_lora, clip_lora
+ # Assign UID to model/lora/strengths combo
+ unique_id = f'{input_model_hash};{lora_name};{strength_model};{strength_clip}'
+ # Check if Lora model already exists
+ existing_lora_models = loaded_objects.get("lora", {}).get(lora_name, None)
+ if existing_lora_models and unique_id in existing_lora_models:
+ model_lora, clip_lora = existing_lora_models[unique_id]
+ return model_lora, clip_lora
# If Lora model not found or strength values changed or model changed, generate new Lora models
lora_path = folder_paths.get_full_path("loras", lora_name)
- model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora_path, strength_model, strength_clip)
-
- # Remove existing Lora model if it exists
- if existing_lora:
- loaded_objects["lora"].remove(existing_lora[0])
+ lora = comfy.utils.load_torch_file(lora_path, safe_load=True)
+ model_lora, clip_lora = comfy.sd.load_lora_for_models(model, clip, lora, strength_model, strength_clip)
- # Update loaded_objects[] array
- loaded_objects["lora"].append((lora_name, model_name, model_lora, clip_lora, strength_model, strength_clip))
+ if lora_name not in loaded_objects["lora"]:
+ loaded_objects["lora"][lora_name] = {}
+ loaded_objects["lora"][lora_name][unique_id] = (model_lora, clip_lora)
return model_lora, clip_lora
+# Sampler Functions
+def common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent, denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, preview_latent=True, disable_pbar=False):
+ device = comfy.model_management.get_torch_device()
+ latent_image = latent["samples"]
-#---------------------------------------------------------------ttN Pipe Loader START---------------------------------------------------------------#
+ if disable_noise:
+ noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
+ else:
+ batch_inds = latent["batch_index"] if "batch_index" in latent else None
+ noise = comfy.sample.prepare_noise(latent_image, seed, batch_inds)
-# ttN Pipe Loader (Modifed from TSC Efficient Loader and Advanced clip text encode)
-from .adv_encode import advanced_encode
+ noise_mask = None
+ if "noise_mask" in latent:
+ noise_mask = latent["noise_mask"]
+
+ preview_format = "JPEG"
+ if preview_format not in ["JPEG", "PNG"]:
+ preview_format = "JPEG"
+
+ previewer = False
+
+ if preview_latent:
+ previewer = latent_preview.get_previewer(device, model.model.latent_format)
+
+ pbar = comfy.utils.ProgressBar(steps)
+ def callback(step, x0, x, total_steps):
+ preview_bytes = None
+ if previewer:
+ preview_bytes = previewer.decode_latent_to_preview_image(preview_format, x0)
+ pbar.update_absolute(step + 1, total_steps, preview_bytes)
+
+ samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
+ denoise=denoise, disable_noise=disable_noise, start_step=start_step, last_step=last_step,
+ force_full_denoise=force_full_denoise, noise_mask=noise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
+
+ out = latent.copy()
+ out["samples"] = samples
+ return out
+
+def enforce_mul_of_64(d):
+ d = int(d)
+ if d<=7:
+ d = 8
+ leftover = d % 8 # 8 is the number of pixels per byte
+ if leftover != 0: # if the number of pixels is not a multiple of 8
+ if (leftover < 4): # if the number of pixels is less than 4
+ d -= leftover # remove the leftover pixels
+ else: # if the number of pixels is more than 4
+ d += 8 - leftover # add the leftover pixels
+
+ return int(d)
+
+def upscale(samples, upscale_method, factor, crop):
+ samples = samples[0]
+ s = samples.copy()
+ x = samples["samples"].shape[3]
+ y = samples["samples"].shape[2]
+
+ new_x = int(x * factor)
+ new_y = int(y * factor)
+
+ if (new_x > MAX_RESOLUTION):
+ new_x = MAX_RESOLUTION
+ if (new_y > MAX_RESOLUTION):
+ new_y = MAX_RESOLUTION
+
+ s["samples"] = comfy.utils.common_upscale(
+ samples["samples"], enforce_mul_of_64(
+ new_x), enforce_mul_of_64(new_y), upscale_method, crop
+ )
+ return (s,)
+
+def tensor2pil(image: torch.Tensor) -> Image.Image:
+ return Image.fromarray(np.clip(255. * image.cpu().numpy().squeeze(), 0, 255).astype(np.uint8))
+
+def pil2tensor(image: Image.Image) -> torch.Tensor:
+ return torch.from_numpy(np.array(image).astype(np.float32) / 255.0).unsqueeze(0)
+
+# Functions for saving
+def get_save_image_path(filename_prefix: str, output_dir: str, image_width: int = 0, image_height: int = 0, output_folder: str = "Default") -> Tuple[str, str, int, str, str]:
+ def map_filename(filename: str) -> Tuple[int, str]:
+ prefix_len = len(os.path.basename(filename_prefix))
+ prefix = filename[:prefix_len]
+ digits = re.search('\d+', filename[prefix_len:])
+ return (int(digits.group()) if digits else 0, prefix)
+
+ filename_prefix = filename_prefix.replace("%width%", str(image_width)).replace("%height%", str(image_height))
+
+ subfolder = os.path.dirname(os.path.normpath(filename_prefix))
+ filename = os.path.basename(os.path.normpath(filename_prefix))
+
+ full_output_folder = output_folder if os.path.isdir(output_folder) else os.path.join(output_dir, subfolder)
+
+ try:
+ counter = max(filter(lambda a: a[1] == filename, map(map_filename, os.listdir(full_output_folder))))[0] + 1
+ except (ValueError, FileNotFoundError):
+ os.makedirs(full_output_folder, exist_ok=True)
+ counter = 1
+
+ return full_output_folder, filename, counter, subfolder, filename_prefix
+
+def format_date(text: str, date: datetime.datetime) -> str:
+ date_formats = {
+ 'd': lambda d: d.day,
+ 'M': lambda d: d.month,
+ 'h': lambda d: d.hour,
+ 'm': lambda d: d.minute,
+ 's': lambda d: d.second,
+ 'yyyy': lambda d: d.year,
+ 'yyy': lambda d: str(d.year)[1:],
+ 'yy': lambda d: str(d.year)[2:]
+ }
+ for format_str, format_func in date_formats.items():
+ if format_str in text:
+ text = text.replace(format_str, '{:02d}'.format(format_func(date)))
+
+ return text
+
+def gather_all_inputs(prompt: Dict[str, dict], unique_id: str, linkInput: str = '', collected_inputs: Optional[Dict[str, Union[str, List[str]]]] = None) -> Dict[str, Union[str, List[str]]]:
+ collected_inputs = collected_inputs or {}
+ prompt_inputs = prompt[str(unique_id)]["inputs"]
+
+ for pInput, pInputValue in prompt_inputs.items():
+ aInput = f"{linkInput}>{pInput}" if linkInput else pInput
+
+ if isinstance(pInputValue, list):
+ gather_all_inputs(prompt, pInputValue[0], aInput, collected_inputs)
+ else:
+ existing_value = collected_inputs.get(aInput)
+ if existing_value is None:
+ collected_inputs[aInput] = pInputValue
+ elif pInputValue not in existing_value:
+ collected_inputs[aInput] = existing_value + "; " + pInputValue
+
+ return collected_inputs
+
+def filename_parser(filename_prefix: str, prompt: Dict[str, dict], my_unique_id: str) -> str:
+ filename_prefix = re.sub(r'%date:(.*?)%', lambda m: format_date(m.group(1), datetime.datetime.now()), filename_prefix)
+ all_inputs = gather_all_inputs(prompt, my_unique_id)
+
+ filename_prefix = re.sub(r'%(.*?)%', lambda m: str(all_inputs[m.group(1)]), filename_prefix)
+ filename_prefix = re.sub(r'[/\\]+', '-', filename_prefix)
+
+ return filename_prefix
+
+def save_images(self, images, preview_prefix, save_prefix, image_output, prompt=None, extra_pnginfo=None, my_unique_id=None, embed_workflow=True, output_folder="Default", number_padding=5, overwrite_existing="False"):
+ if output_folder != "Default" and not os.path.exists(output_folder):
+ ttNl(f"Folder {output_folder} does not exist. Attempting to create...").warn().p()
+ try:
+ os.makedirs(output_folder)
+ ttNl(f"{output_folder} Created Successfully").success().p()
+ except:
+ ttNl(f"Failed to create folder {output_folder}").error().p()
+
+ if image_output in ("Hide"):
+ return []
+ elif image_output in ("Save", "Hide/Save"):
+ output_dir = output_folder if os.path.exists(output_folder) else folder_paths.get_output_directory()
+ filename_prefix = save_prefix
+ type = "output"
+ elif image_output in ("Preview"):
+ output_dir = folder_paths.get_temp_directory()
+ filename_prefix = preview_prefix
+ type = "temp"
+
+
+ filename_prefix = filename_parser(filename_prefix, prompt, my_unique_id)
+ full_output_folder, filename, counter, subfolder, filename_prefix = get_save_image_path(filename_prefix, output_dir, images[0].shape[1], images[0].shape[0], output_dir)
+
+ results = []
+ for image in images:
+ img = Image.fromarray(np.clip(255. * image.cpu().numpy(), 0, 255).astype(np.uint8))
+ metadata = PngInfo()
+
+ if embed_workflow in (True, "True"):
+ if prompt is not None:
+ metadata.add_text("prompt", json.dumps(prompt))
+ if extra_pnginfo is not None:
+ for key, value in extra_pnginfo.items():
+ metadata.add_text(key, json.dumps(value))
+
+ def filename_padding(number_padding, filename, counter):
+ return f"{filename}.png" if number_padding is None else f"{filename}_{counter:0{number_padding}}.png"
+
+ number_padding = None if number_padding == "None" else int(number_padding)
+ overwrite_existing = True if overwrite_existing == "True" else False
+
+ file = os.path.join(full_output_folder, filename_padding(number_padding, filename, counter))
+
+ if overwrite_existing or not os.path.isfile(file):
+ img.save(file, pnginfo=metadata, compress_level=4)
+ else:
+ if number_padding is None:
+ number_padding = 1
+ while os.path.isfile(file):
+ number_padding += 1
+ file = os.path.join(full_output_folder, filename_padding(number_padding, filename, counter))
+ img.save(file, pnginfo=metadata, compress_level=4)
+
+ results.append({"filename": file, "subfolder": subfolder, "type": type})
+ counter += 1
+
+ return results
+
+#---------------------------------------------------------------ttN/pipe START----------------------------------------------------------------------#
class ttN_TSC_pipeLoader:
+ version = '1.0.0'
@classmethod
def INPUT_TYPES(cls):
return {"required": {
@@ -210,12 +524,12 @@ def INPUT_TYPES(cls):
"negative_token_normalization": (["none", "mean", "length", "length+mean"],),
"negative_weight_interpretation": (["comfy", "A1111", "compel", "comfy++"],),
- "empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
- "empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 64}),
+ "empty_latent_width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
+ "empty_latent_height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
},
- "hidden": {"prompt": "PROMPT"}}
+ "hidden": {"prompt": "PROMPT", "ttNnodeVersion": ttN_TSC_pipeLoader.version}}
RETURN_TYPES = ("PIPE_LINE" ,"MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "INT",)
RETURN_NAMES = ("pipe","model", "positive", "negative", "latent", "vae", "clip", "seed",)
@@ -245,8 +559,6 @@ def adv_pipeloader(self, ckpt_name, vae_name, clip_skip,
# Load models
model, clip, vae = load_checkpoint(ckpt_name)
-
- # note: load_lora only works properly (as of now) when ckpt dictionary is only 1 entry long!
if lora1_name != "None":
model, clip = load_lora(lora1_name, model, clip, lora1_model_strength, lora1_clip_strength)
@@ -270,165 +582,53 @@ def adv_pipeloader(self, ckpt_name, vae_name, clip_skip,
negative_embeddings_final = advanced_encode(clip, negative, negative_token_normalization, negative_weight_interpretation, w_max=1.0)
image = pil2tensor(Image.new('RGB', (1, 1), (0, 0, 0)))
- pipe = (model, [[positive_embeddings_final, {}]], [[negative_embeddings_final, {}]], samples, vae, clip, image, seed)
-
- return (pipe, model, [[positive_embeddings_final, {}]], [[negative_embeddings_final, {}]], samples, vae, clip, seed)
-#---------------------------------------------------------------ttN Pipe Loader END-----------------------------------------------------------------#
-
-#Functions for upscaling
-def enforce_mul_of_64(d):
- d = int(d)
- if d<=7:
- d = 8
- leftover = d % 8 # 8 is the number of pixels per byte
- if leftover != 0: # if the number of pixels is not a multiple of 8
- if (leftover < 4): # if the number of pixels is less than 4
- d -= leftover # remove the leftover pixels
- else: # if the number of pixels is more than 4
- d += 8 - leftover # add the leftover pixels
-
- return int(d)
-
-def upscale(samples, upscale_method, factor, crop):
-
- samples = samples[0]
- s = samples.copy()
- x = samples["samples"].shape[3]
- y = samples["samples"].shape[2]
-
- new_x = int(x * factor)
- new_y = int(y * factor)
-
- if (new_x > MAX_RESOLUTION):
- new_x = MAX_RESOLUTION
- if (new_y > MAX_RESOLUTION):
- new_y = MAX_RESOLUTION
-
- #print(f'{PACKAGE_NAME}:upscale from ({x*8},{y*8}) to ({new_x*8},{new_y*8})')
-
- s["samples"] = comfy.utils.common_upscale(
- samples["samples"], enforce_mul_of_64(
- new_x), enforce_mul_of_64(new_y), upscale_method, crop
- )
- return (s,)
-
-def get_save_image_path(filename_prefix, output_dir, image_width=0, image_height=0, output_folder="Default"):
- def map_filename(filename):
- prefix_len = len(os.path.basename(filename_prefix))
- prefix = filename[:prefix_len + 1]
- try:
- digits = int(filename[prefix_len + 1:].split('_')[0])
- except:
- digits = 0
- return (digits, prefix)
-
- def compute_vars(input, image_width, image_height):
- input = input.replace("%width%", str(image_width))
- input = input.replace("%height%", str(image_height))
- return input
-
- filename_prefix = compute_vars(filename_prefix, image_width, image_height)
-
- subfolder = os.path.dirname(os.path.normpath(filename_prefix))
- filename = os.path.basename(os.path.normpath(filename_prefix))
-
- if os.path.isdir(output_folder):
- full_output_folder = output_folder
- else:
- full_output_folder = os.path.join(output_dir, subfolder)
-
- try:
- counter = max(filter(lambda a: a[1][:-1] == filename and a[1][-1] == "_", map(map_filename, os.listdir(full_output_folder))))[0] + 1
- except ValueError:
- counter = 1
- except FileNotFoundError:
- os.makedirs(full_output_folder, exist_ok=True)
- counter = 1
- return full_output_folder, filename, counter, subfolder, filename_prefix
-
-def format_date(text, date):
- parts = {
- 'd': lambda d: d.day,
- 'M': lambda d: d.month,
- 'h': lambda d: d.hour,
- 'm': lambda d: d.minute,
- 's': lambda d: d.second,
- }
-
- for key in parts.keys():
- if key + key in text:
- text = text.replace(key + key, '{:02d}'.format(parts[key](date)))
- elif key in text:
- text = text.replace(key, '{}'.format(parts[key](date)))
-
- if 'yyyy' in text:
- text = text.replace('yyyy', str(date.year))
-
- if 'yyy' in text:
- text = text.replace('yyy', str(date.year)[1:])
-
- if 'yy' in text:
- text = text.replace('yy', str(date.year)[2:])
-
- return text
-
-def replace_date_placeholder(input_string):
- date_pattern = re.compile(r'%date:(.*?)%')
- matches = date_pattern.findall(input_string)
-
- for match in matches:
- current_date_str = format_date(match, datetime.datetime.now())
- input_string = input_string.replace(f'%date:{match}%', current_date_str)
-
- return input_string
-
-
-def save_images(self, images, preview_prefix, save_prefix, image_output, prompt=None, extra_pnginfo=None, output_folder="Default"):
- if image_output in ("Save", "Hide/Save"):
- output_dir = folder_paths.get_output_directory()
- filename_prefix = save_prefix
- type = "output"
- elif image_output in ("Preview", "Hide"):
- output_dir = folder_paths.get_temp_directory()
- filename_prefix = preview_prefix
- type = "temp"
-
- filename_prefix = replace_date_placeholder(filename_prefix)
-
- full_output_folder, filename, counter, subfolder, filename_prefix = get_save_image_path(filename_prefix, output_dir, images[0].shape[1], images[0].shape[0], output_folder)
- results = list()
- for image in images:
- i = 255. * image.cpu().numpy()
- img = Image.fromarray(np.clip(i, 0, 255).astype(np.uint8))
- metadata = PngInfo()
- if prompt is not None:
- metadata.add_text("prompt", json.dumps(prompt))
- if extra_pnginfo is not None:
- for x in extra_pnginfo:
- metadata.add_text(x, json.dumps(extra_pnginfo[x]))
-
- file = f"{filename}_{counter:05}_.png"
- img.save(os.path.join(full_output_folder, file), pnginfo=metadata, compress_level=4)
- results.append({
- "filename": file,
- "subfolder": subfolder,
- "type": type
- })
- counter += 1
-
- return results
+ pipe = {"vars": {"model": model,
+ "positive": positive_embeddings_final,
+ "negative": negative_embeddings_final,
+ "samples": samples,
+ "vae": vae,
+ "clip": clip,
+ "images": image,
+ "seed": seed},
+ "orig": {"model": model,
+ "positive": positive_embeddings_final,
+ "negative": negative_embeddings_final,
+ "samples": samples,
+ "vae": vae,
+ "clip": clip,
+ "images": image,
+ "seed": seed},
+
+ "loader_settings": {"ckpt_name": ckpt_name,
+ "vae_name": vae_name,
+ "clip_skip": clip_skip,
+ "lora1_name": lora1_name,
+ "lora1_model_strength": lora1_model_strength,
+ "lora1_clip_strength": lora1_clip_strength,
+ "lora2_name": lora2_name,
+ "lora2_model_strength": lora2_model_strength,
+ "lora2_clip_strength": lora2_clip_strength,
+ "lora3_name": lora3_name,
+ "lora3_model_strength": lora3_model_strength,
+ "lora3_clip_strength": lora3_clip_strength,
+ "positive": positive,
+ "positive_token_normalization": positive_token_normalization,
+ "positive_weight_interpretation": positive_weight_interpretation,
+ "negative": negative,
+ "negative_token_normalization": negative_token_normalization,
+ "negative_weight_interpretation": negative_weight_interpretation,
+ "empty_latent_width": empty_latent_width,
+ "empty_latent_height": empty_latent_height,
+ "batch_size": batch_size,
+ "seed": seed,
+ "empty_samples": samples,
+ "empty_image": image,}
+ }
+ return (pipe, model, positive_embeddings_final, negative_embeddings_final, samples, vae, clip, seed)
-#---------------------------------------------------------------ttN Pipe KSampler START-------------------------------------------------------------#
-last_helds: dict[str, list] = {
- "results": [],
- "samples": [],
- "images": [],
- "vae_decode": []
-}
-
-# ttN pipeKSampler (Modified from TSC KSampler (Advanced), Upscale from QualityOfLifeSuite_Omar92)
class ttN_TSC_pipeKSampler:
+ version = '1.0.0'
empty_image = pil2tensor(Image.new('RGBA', (1, 1), (0, 0, 0, 0)))
upscale_methods = ["None", "nearest-exact", "bilinear", "area"]
crop_methods = ["disabled", "center"]
@@ -436,7 +636,6 @@ class ttN_TSC_pipeKSampler:
def __init__(self):
pass
-
@classmethod
def INPUT_TYPES(cls):
return {"required":
@@ -449,13 +648,13 @@ def INPUT_TYPES(cls):
"upscale_method": (cls.upscale_methods,),
"factor": ("FLOAT", {"default": 2, "min": 0.0, "max": 10.0, "step": 0.25}),
"crop": (cls.crop_methods,),
- "sampler_state": (["Sample", "Hold", "Script"], ),
+ "sampler_state": (["Sample", "Hold"], ),
"steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
"cfg": ("FLOAT", {"default": 8.0, "min": 0.0, "max": 100.0}),
"sampler_name": (comfy.samplers.KSampler.SAMPLERS,),
"scheduler": (comfy.samplers.KSampler.SCHEDULERS,),
"denoise": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 1.0, "step": 0.01}),
- "image_output": (["Disabled", "Hide", "Preview", "Save", "Hide/Save"],),
+ "image_output": (["Hide", "Preview", "Save", "Hide/Save"],),
"save_prefix": ("STRING", {"default": "ComfyUI"})
},
"optional":
@@ -466,11 +665,13 @@ def INPUT_TYPES(cls):
"optional_latent": ("LATENT",),
"optional_vae": ("VAE",),
"optional_clip": ("CLIP",),
- "script": ("SCRIPT",),
+ "xyPlot": ("XYPLOT",),
},
"hidden":
- {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID",},
- }
+ {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID",
+ "embeddingsList": (folder_paths.get_filename_list("embeddings"),),
+ "ttNnodeVersion": ttN_TSC_pipeKSampler.version},
+ }
RETURN_TYPES = ("PIPE_LINE", "MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT",)
RETURN_NAMES = ("pipe", "model", "positive", "negative", "latent","vae", "clip", "image", "seed", )
@@ -479,26 +680,29 @@ def INPUT_TYPES(cls):
CATEGORY = "ttN/pipe"
def sample(self, pipe, lora_name, lora_model_strength, lora_clip_strength, sampler_state, steps, cfg, sampler_name, scheduler, image_output, save_prefix, denoise=1.0,
- optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, seed=None, script=None, upscale_method=None, factor=None, crop=None, prompt=None, extra_pnginfo=None, my_unique_id=None,):
+ optional_model=None, optional_positive=None, optional_negative=None, optional_latent=None, optional_vae=None, optional_clip=None, seed=None, xyPlot=None, upscale_method=None, factor=None, crop=None, prompt=None, extra_pnginfo=None, my_unique_id=None,):
global last_helds
- model, positive, negative, samples, vae, clip, images, pipe_seed = pipe
+ # Clean Loader Models from Global
+ update_loaded_objects(prompt)
- #Optional overrides
- model = optional_model if optional_model is not None else model
- positive = optional_positive if optional_positive is not None else positive
- negative = optional_negative if optional_negative is not None else negative
- samples = optional_latent if optional_latent is not None else samples
- vae = optional_vae if optional_vae is not None else vae
- clip = optional_clip if optional_clip is not None else clip
+ my_unique_id = int(my_unique_id)
+ preview_prefix = f"KSpipe_{my_unique_id:02d}"
- seed = pipe_seed if seed in (None, 'undefined') else seed
+ pipe["vars"]["model"] = optional_model if optional_model is not None else pipe["orig"]["model"]
+ pipe["vars"]["positive"] = optional_positive if optional_positive is not None else pipe["orig"]["positive"]
+ pipe["vars"]["negative"] = optional_negative if optional_negative is not None else pipe["orig"]["negative"]
+ pipe["vars"]["samples"] = optional_latent if optional_latent is not None else pipe["orig"]["samples"]
+ pipe["vars"]["vae"] = optional_vae if optional_vae is not None else pipe["orig"]["vae"]
+ pipe["vars"]["clip"] = optional_clip if optional_clip is not None else pipe["orig"]["clip"]
- #load Lora
- if lora_name not in (None, "None"):
- model, clip = load_lora(lora_name, model, clip, lora_model_strength, lora_clip_strength)
-
+
+ if seed in (None, 'undefined'):
+ seed = pipe["vars"]["seed"]
+ else:
+ pipe["vars"]["seed"] = seed
+
def get_value_by_id(key: str, my_unique_id):
for value, id_ in last_helds[key]:
if id_ == my_unique_id:
@@ -525,93 +729,122 @@ def init_state(my_unique_id, key, default):
return value
return default
+ def safe_split(s, delimiter):
+ parts = s.split(delimiter)
+ for part in parts:
+ if part in ('', ' ', ' '):
+ parts.remove(part)
- def process_sample_state(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise, vae, clip, images, image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id):
+ while len(parts) < 2:
+ parts.append('None')
+ return parts
+
+ def get_output(pipe):
+ return (pipe,
+ pipe["vars"].get("model"),
+ pipe["vars"].get("positive"),
+ pipe["vars"].get("negative"),
+ pipe["vars"].get("samples"),
+ pipe["vars"].get("vae"),
+ pipe["vars"].get("clip"),
+ pipe["vars"].get("images"),
+ pipe["vars"].get("seed"))
- samples = common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise)
- update_value_by_id("samples", my_unique_id, samples)
- new_pipe = (model, positive, negative, samples, vae, clip, images, seed)
+ def process_sample_state(self, pipe, lora_name, lora_model_strength, lora_clip_strength,
+ steps, cfg, sampler_name, scheduler, denoise,
+ image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent):
+ # Load Lora
+ if lora_name not in (None, "None"):
+ pipe["vars"]["model"], pipe["vars"]["clip"] = load_lora(lora_name, pipe["vars"]["model"], pipe["vars"]["clip"], lora_model_strength, lora_clip_strength)
- if image_output == "Disabled":
- update_value_by_id("vae_decode", my_unique_id, True)
- return (new_pipe, model, positive, negative, samples, vae, clip, images, seed,)
+ # Upscale samples if enabled
+ pipe["vars"]["samples"] = handle_upscale(pipe["vars"]["samples"], upscale_method, factor, crop)
- latent = samples[0]["samples"]
- images = vae.decode(latent).cpu()
- update_value_by_id("images", my_unique_id, images)
+ pipe["vars"]["samples"] = common_ksampler(pipe["vars"]["model"], pipe["vars"]["seed"], steps, cfg, sampler_name, scheduler, pipe["vars"]["positive"], pipe["vars"]["negative"], pipe["vars"]["samples"], denoise=denoise, preview_latent=preview_latent)
+
+ update_value_by_id("samples", my_unique_id, pipe["vars"]["samples"])
+
+ latent = pipe["vars"]["samples"]["samples"]
+ pipe["vars"]["images"] = pipe["vars"]["vae"].decode(latent).cpu()
+ update_value_by_id("images", my_unique_id, pipe["vars"]["images"])
update_value_by_id("vae_decode", my_unique_id, False)
- results = save_images(self, images, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo)
+ results = save_images(self, pipe["vars"]["images"], preview_prefix, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id)
+
update_value_by_id("results", my_unique_id, results)
- list(new_pipe)[6] = images
- tuple(new_pipe)
+ # Clean loaded_objects
+ update_loaded_objects(prompt)
+
+ new_pipe = {**pipe, 'orig': pipe['vars']}
if image_output in ("Hide", "Hide/Save"):
- return (new_pipe, model, positive, negative, samples, vae, clip, images, seed,)
+ return get_output(new_pipe)
+
+ return {"ui": {"images": results},
+ "result": get_output(new_pipe)}
- return {"ui": {"images": results}, "result": (new_pipe, model, positive, negative, samples, vae, clip, images, seed,)}
+ def process_hold_state(self, pipe, image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id):
+ ttNl('Held').t(f'pipeKSampler[{my_unique_id}]').p()
- def process_hold_state(self, model, positive, negative, vae, clip, seed, image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id, samples, images):
- print(f'\033[32mpipeKSampler[{my_unique_id}]:\033[0mHeld')
+ # Load Lora
+ if lora_name not in (None, "None"):
+ pipe["vars"]["model"], pipe["vars"]["clip"] = load_lora(lora_name, pipe["vars"]["model"], pipe["vars"]["clip"], lora_model_strength, lora_clip_strength)
- last_samples = init_state(my_unique_id, "samples", (samples,))
+ # Upscale samples if enabled
+ pipe["vars"]["samples"] = handle_upscale(pipe["vars"]["samples"], upscale_method, factor, crop)
- last_images = init_state(my_unique_id, "images", images)
+ last_samples = init_state(my_unique_id, "samples", pipe["vars"]["samples"])
- last_results = init_state(my_unique_id, "results", list())
+ last_images = init_state(my_unique_id, "images", pipe["vars"]["images"])
- new_pipe = (model, positive, negative, last_samples, vae, clip, last_images, seed,)
- if image_output == "Disabled":
- return (new_pipe, model, positive, negative, last_samples, vae, clip, last_images, seed,)
+ last_results = init_state(my_unique_id, "results", list())
- latent = last_samples[0]["samples"]
+ latent = last_samples["samples"]
if get_value_by_id("vae_decode", my_unique_id) == True:
- images = vae.decode(latent).cpu()
- list(new_pipe)[6] = images
- tuple(new_pipe)
+ pipe["vars"]["images"] = pipe["vars"]["vae"].decode(latent).cpu()
- update_value_by_id("images", my_unique_id, images)
+ update_value_by_id("images", my_unique_id, pipe["vars"]["images"])
update_value_by_id("vae_decode", my_unique_id, False)
- results = save_images(self, images, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo)
+ results = save_images(self, pipe["vars"]["images"], preview_prefix, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id)
update_value_by_id("results", my_unique_id, results)
else:
- images = last_images
+ pipe["vars"]["images"] = last_images
results = last_results
- if image_output in ("Hide", "Hide/Save"):
- return (new_pipe, model, positive, negative, last_samples, vae, clip, images, seed,)
+ new_pipe = {**pipe, 'orig': pipe['vars']}
- return {"ui": {"images": results}, "result": (new_pipe, model, positive, negative, last_samples, vae, clip, images, seed,)}
+ if image_output in ("Hide", "Hide/Save"):
+ return get_output(new_pipe)
- def process_script_state(self, script, samples, images, vae, my_unique_id, seed, model, positive, negative, clip, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo, scheduler, steps, cfg, sampler_name, denoise):
+ return {"ui": {"images": results}, "result": get_output(new_pipe)}
- last_samples = init_state(my_unique_id, "samples", (samples,))
- last_images = init_state(my_unique_id, "images", images)
+ def process_xyPlot(self, pipe, lora_name, lora_model_strength, lora_clip_strength,
+ steps, cfg, sampler_name, scheduler, denoise,
+ image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent, xyPlot):
+
+ x_node_type, x_type = safe_split(xyPlot[0], ': ')
+ x_values = xyPlot[1]
+ if x_type == 'None':
+ x_values = []
- new_pipe = (model, positive, negative, samples, vae, clip, last_images, seed,)
- # If no script input connected, set X_type and Y_type to "Nothing"
- if script is None:
- X_type = "Nothing"
- Y_type = "Nothing"
- else:
- # Unpack script Tuple (X_type, X_value, Y_type, Y_value, grid_spacing, latent_id)
- X_type, X_value, Y_type, Y_value, grid_spacing, latent_id = script
+ y_node_type, y_type = safe_split(xyPlot[2], ': ')
+ y_values = xyPlot[3]
+ if y_type == 'None':
+ y_values = []
- if (X_type == "Nothing" and Y_type == "Nothing"):
- print('\033[31mpipeKSampler[{}] Error:\033[0m No valid script entry detected'.format(my_unique_id))
- return {"ui": {"images": list()},
- "result": (new_pipe, model, positive, negative, last_samples, vae, clip, last_images, seed)}
+ grid_spacing = xyPlot[4]
+ latent_id = xyPlot[5]
- if vae == (None,):
- print('\033[31mpipeKSampler[{}] Error:\033[0m VAE must be connected to use Script mode.'.format(my_unique_id))
- return {"ui": {"images": list()},
- "result": (new_pipe, model, positive, negative, last_samples, vae, clip, last_images, seed)}
+ if x_type == 'None' and y_type == 'None':
+ ttNl('No Valid Plot Types - Reverting to default sampling...').t(f'pipeKSampler[{my_unique_id}]').warn().p()
+ return process_sample_state(self, pipe, lora_name, lora_model_strength, lora_clip_strength, steps, cfg, sampler_name, scheduler, denoise, image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent)
+
# Extract the 'samples' tensor from the dictionary
- latent_image_tensor = samples['samples']
+ latent_image_tensor = pipe['orig']['samples']['samples']
# Split the tensor into individual image tensors
image_tensors = torch.split(latent_image_tensor, 1, dim=0)
@@ -621,220 +854,155 @@ def process_script_state(self, script, samples, images, vae, my_unique_id, seed,
# Set latent only to the first latent of batch
if latent_id >= len(latent_list):
- print(
- f'\033[31mpipeKSampler[{my_unique_id}] Warning:\033[0m '
- f'The selected latent_id ({latent_id}) is out of range.\n'
- f'Automatically setting the latent_id to the last image in the list (index: {len(latent_list) - 1}).')
+ ttNl(f'The selected latent_id ({latent_id}) is out of range.').t(f'pipeKSampler[{my_unique_id}]').warn().p()
+ ttNl(f'Automatically setting the latent_id to the last image in the list (index: {len(latent_list) - 1}).').t(f'pipeKSampler[{my_unique_id}]').warn().p()
+
latent_id = len(latent_list) - 1
latent_image = latent_list[latent_id]
- # Define X/Y_values for "Seeds++ Batch"
- if X_type == "Seeds++ Batch":
- X_value = [latent_image for _ in range(X_value[0])]
- if Y_type == "Seeds++ Batch":
- Y_value = [latent_image for _ in range(Y_value[0])]
-
- # Define X/Y_values for "Latent Batch"
- if X_type == "Latent Batch":
- X_value = latent_list
- if Y_type == "Latent Batch":
- Y_value = latent_list
-
- # Embedd information into "Scheduler" X/Y_values for text label
- if X_type == "Scheduler" and Y_type != "Sampler":
- # X_value second list value of each array entry = None
- for i in range(len(X_value)):
- if len(X_value[i]) == 2:
- X_value[i][1] = None
- else:
- X_value[i] = [X_value[i], None]
- if Y_type == "Scheduler" and X_type != "Sampler":
- # Y_value second list value of each array entry = None
- for i in range(len(Y_value)):
- if len(Y_value[i]) == 2:
- Y_value[i][1] = None
- else:
- Y_value[i] = [Y_value[i], None]
-
- def define_variable(var_type, var, seed, steps, cfg,sampler_name, scheduler, latent_image, denoise,
- vae_name, var_label, num_label):
-
- # If var_type is "Seeds++ Batch", update var and seed, and generate labels
- if var_type == "Latent Batch":
- latent_image = var
- text = f"{len(var_label)}"
- # If var_type is "Seeds++ Batch", update var and seed, and generate labels
- elif var_type == "Seeds++ Batch":
- text = f"seed: {seed}"
- # If var_type is "Steps", update steps and generate labels
- elif var_type == "Steps":
- steps = var
- text = f"Steps: {steps}"
- # If var_type is "CFG Scale", update cfg and generate labels
- elif var_type == "CFG Scale":
- cfg = var
- text = f"CFG Scale: {cfg}"
- # If var_type is "Sampler", update sampler_name, scheduler, and generate labels
- elif var_type == "Sampler":
- sampler_name = var[0]
- if var[1] == "":
- text = f"{sampler_name}"
- else:
- if var[1] != None:
- scheduler[0] = var[1]
- else:
- scheduler[0] = scheduler[1]
- text = f"{sampler_name} ({scheduler[0]})"
- text = text.replace("ancestral", "a").replace("uniform", "u")
- # If var_type is "Scheduler", update scheduler and generate labels
- elif var_type == "Scheduler":
- scheduler[0] = var[0]
- if len(var) == 2:
- text = f"{sampler_name} ({var[0]})"
- else:
- text = f"{var}"
- text = text.replace("ancestral", "a").replace("uniform", "u")
- # If var_type is "Denoise", update denoise and generate labels
- elif var_type == "Denoise":
- denoise = var
- text = f"Denoise: {denoise}"
- # For any other var_type, set text to "?"
- elif var_type == "VAE":
- vae_name = var
- text = f"VAE: {vae_name}"
- # For any other var_type, set text to ""
- else:
- text = ""
-
- def truncate_texts(texts, num_label):
- min_length = min([len(text) for text in texts])
- truncate_length = min(min_length, 24)
+ random.seed(seed)
- if truncate_length < 16:
- truncate_length = 16
+ plot_image_vars = {
+ "x_node_type": x_node_type, "y_node_type": y_node_type,
+ "lora_name": lora_name, "lora_model_strength": lora_model_strength, "lora_clip_strength": lora_clip_strength,
+ "steps": steps, "cfg": cfg, "sampler_name": sampler_name, "scheduler": scheduler, "denoise": denoise, "seed": pipe["vars"]["seed"],
- truncated_texts = []
- for text in texts:
- if len(text) > truncate_length:
- text = text[:truncate_length] + "..."
- truncated_texts.append(text)
-
- return truncated_texts
-
- # Add the generated text to var_label if it's not full
- if len(var_label) < num_label:
- var_label.append(text)
-
- # If var_type VAE , truncate entries in the var_label list when it's full
- if len(var_label) == num_label and var_type == "VAE":
- var_label = truncate_texts(var_label, num_label)
+ "model": pipe["vars"]["model"], "vae": pipe["vars"]["vae"], "clip": pipe["vars"]["clip"], "positive_cond": pipe["vars"]["positive"], "negative_cond": pipe["vars"]["negative"],
+
+ "ckpt_name": pipe['loader_settings']['ckpt_name'],
+ "vae_name": pipe['loader_settings']['vae_name'],
+ "clip_skip": pipe['loader_settings']['clip_skip'],
+ "lora1_name": pipe['loader_settings']['lora1_name'],
+ "lora1_model_strength": pipe['loader_settings']['lora1_model_strength'],
+ "lora1_clip_strength": pipe['loader_settings']['lora1_clip_strength'],
+ "lora2_name": pipe['loader_settings']['lora2_name'],
+ "lora2_model_strength": pipe['loader_settings']['lora2_model_strength'],
+ "lora2_clip_strength": pipe['loader_settings']['lora2_clip_strength'],
+ "lora3_name": pipe['loader_settings']['lora3_name'],
+ "lora3_model_strength": pipe['loader_settings']['lora3_model_strength'],
+ "lora3_clip_strength": pipe['loader_settings']['lora3_clip_strength'],
+ "positive": pipe['loader_settings']['positive'],
+ "positive_token_normalization": pipe['loader_settings']['positive_token_normalization'],
+ "positive_weight_interpretation": pipe['loader_settings']['positive_weight_interpretation'],
+ "negative": pipe['loader_settings']['negative'],
+ "negative_token_normalization": pipe['loader_settings']['negative_token_normalization'],
+ "negative_weight_interpretation": pipe['loader_settings']['negative_weight_interpretation'],
+ }
- # Return the modified variables
- return steps, cfg,sampler_name, scheduler, latent_image, denoise, vae_name, var_label
+ def define_variable(plot_image_vars, value_type, value, index):
+ value_label = f"{value}"
+ if value_type == "seed":
+ seed = int(plot_image_vars["seed"])
+ if index != 0:
+ index = 1
+ if value == 'increment':
+ plot_image_vars["seed"] = seed + index
+ value_label = f"{plot_image_vars['seed']}"
+
+ elif value == 'decrement':
+ plot_image_vars["seed"] = seed - index
+ value_label = f"{plot_image_vars['seed']}"
+
+ elif value == 'randomize':
+ plot_image_vars["seed"] = random.randint(0, 0xffffffffffffffff)
+ value_label = f"{plot_image_vars['seed']}"
+ else:
+ plot_image_vars[value_type] = value
- # Define a helper function to help process X and Y values
- def process_values(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise,
- vae,vae_name, latent_new=[], max_width=0, max_height=0, image_list=[], size_list=[]):
+ if value_type in ["steps", "cfg", "denoise", "clip_skip",
+ "lora1_model_strength", "lora1_clip_strength",
+ "lora2_model_strength", "lora2_clip_strength",
+ "lora3_model_strength", "lora3_clip_strength"]:
+ value_label = f"{value_type}: {value}"
+
+ elif value_type == "positive_token_normalization":
+ value_label = f'(+) token norm.: {value}'
+ elif value_type == "positive_weight_interpretation":
+ value_label = f'(+) weight interp.: {value}'
+ elif value_type == "negative_token_normalization":
+ value_label = f'(-) token norm.: {value}'
+ elif value_type == "negative_weight_interpretation":
+ value_label = f'(-) weight interp.: {value}'
+
+ elif value_type == "positive":
+ value_label = f"pos prompt {index + 1}"
+ elif value_type == "negative":
+ value_label = f"neg prompt {index + 1}"
+
+ return plot_image_vars, value_label
+
+ def update_label(label, value, num_items):
+ if len(label) < num_items:
+ return [*label, value]
+ return label
+
+ def sample_plot_image(plot_image_vars, samples, preview_latent, max_width, max_height, latent_new, image_list):
+ model, clip, vae, positive, negative = None, None, None, None, None
+
+ if plot_image_vars["x_node_type"] == "loader" or plot_image_vars["y_node_type"] == "loader":
+ model, clip, vae = load_checkpoint(plot_image_vars['ckpt_name'])
+
+ if plot_image_vars['lora1_name'] != "None":
+ model, clip = load_lora(plot_image_vars['lora1_name'], model, clip, plot_image_vars['lora1_model_strength'], plot_image_vars['lora1_clip_strength'])
+
+ if plot_image_vars['lora2_name'] != "None":
+ model, clip = load_lora(plot_image_vars['lora2_name'], model, clip, plot_image_vars['lora2_model_strength'], plot_image_vars['lora2_clip_strength'])
+
+ if plot_image_vars['lora3_name'] != "None":
+ model, clip = load_lora(plot_image_vars['lora3_name'], model, clip, plot_image_vars['lora3_model_strength'], plot_image_vars['lora3_clip_strength'])
+
+ # Check for custom VAE
+ if plot_image_vars['vae_name'] != "Baked VAE":
+ plot_image_vars['vae'] = load_vae(plot_image_vars['vae_name'])
+
+ # CLIP skip
+ if not clip:
+ raise Exception("No CLIP found")
+ clip = clip.clone()
+ clip.clip_layer(plot_image_vars['clip_skip'])
+
+ positive = advanced_encode(clip, plot_image_vars['positive'], plot_image_vars['positive_token_normalization'], plot_image_vars['positive_weight_interpretation'], w_max=1.0)
+ negative = advanced_encode(clip, plot_image_vars['negative'], plot_image_vars['negative_token_normalization'], plot_image_vars['negative_weight_interpretation'], w_max=1.0)
+
+ model = model if model is not None else plot_image_vars["model"]
+ clip = clip if clip is not None else plot_image_vars["clip"]
+ vae = vae if vae is not None else plot_image_vars["vae"]
+ positive = positive if positive is not None else plot_image_vars["positive_cond"]
+ negative = negative if negative is not None else plot_image_vars["negative_cond"]
+
+ seed = plot_image_vars["seed"]
+ steps = plot_image_vars["steps"]
+ cfg = plot_image_vars["cfg"]
+ sampler_name = plot_image_vars["sampler_name"]
+ scheduler = plot_image_vars["scheduler"]
+ denoise = plot_image_vars["denoise"]
+
+ if plot_image_vars["lora_name"] not in ('None', None):
+ model, clip = load_lora(plot_image_vars["lora_name"], model, clip, plot_image_vars["lora_model_strength"], plot_image_vars["lora_clip_strength"])
# Sample
- samples = common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative,
- latent_image, denoise=denoise)
+ samples = common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise=denoise, preview_latent=preview_latent)
# Decode images and store
- latent = samples[0]["samples"]
+ latent = samples["samples"]
# Add the latent tensor to the tensors list
latent_new.append(latent)
- # Load custom vae if available
- if vae_name is not None:
- vae = load_vae(vae_name)
-
# Decode the image
image = vae.decode(latent).cpu()
# Convert the image from tensor to PIL Image and add it to the list
pil_image = tensor2pil(image)
image_list.append(pil_image)
- size_list.append(pil_image.size)
-
- # Save the original image
- save_images(self, image, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo)
# Update max dimensions
max_width = max(max_width, pil_image.width)
max_height = max(max_height, pil_image.height)
# Return the touched variables
- return image_list, size_list, max_width, max_height, latent_new
-
- # Initiate Plot label text variables X/Y_label
- X_label = []
- Y_label = []
-
- # Seed_updated for "Seeds++ Batch" incremental seeds
- seed_updated = seed
-
- # Store the KSamplers original scheduler inside the same scheduler variable
- scheduler = [scheduler, scheduler]
-
- # By default set vae_name to None
- vae_name = None
-
- # Fill Plot Rows (X)
- for X_index, X in enumerate(X_value):
- # Seed control based on loop index during Batch
- if X_type == "Seeds++ Batch":
- # Update seed based on the inner loop index
- seed_updated = seed + X_index
-
- # Define X parameters and generate labels
- steps, cfg, sampler_name, scheduler, latent_image, denoise, vae_name, X_label = \
- define_variable(X_type, X, seed_updated, steps, cfg, sampler_name, scheduler, latent_image,
- denoise, vae_name, X_label, len(X_value))
-
- if Y_type != "Nothing":
- # Seed control based on loop index during Batch
- for Y_index, Y in enumerate(Y_value):
- if Y_type == "Seeds++ Batch":
- # Update seed based on the inner loop index
- seed_updated = seed + Y_index
-
- # Define Y parameters and generate labels
- steps, cfg, sampler_name, scheduler, latent_image, denoise, vae_name, Y_label = \
- define_variable(Y_type, Y, seed_updated, steps, cfg, sampler_name, scheduler, latent_image,
- denoise, vae_name, Y_label, len(Y_value))
-
- # Generate images
- image_list, size_list, max_width, max_height, latent_new = \
- process_values(model, seed_updated, steps, cfg, sampler_name, scheduler[0],
- positive, negative, latent_image, denoise, vae, vae_name)
- else:
- # Generate images
- image_list, size_list, max_width, max_height, latent_new = \
- process_values(model, seed_updated, steps, cfg, sampler_name, scheduler[0],
- positive, negative, latent_image, denoise, vae, vae_name)
-
-
- def adjusted_font_size(text, initial_font_size, max_width):
- font = ImageFont.truetype(str(Path(font_path)), initial_font_size)
- text_width, _ = font.getsize(text)
-
- if text_width > (max_width * 0.9):
- scaling_factor = 0.9 # A value less than 1 to shrink the font size more aggressively
- new_font_size = int(initial_font_size * (max_width / text_width) * scaling_factor)
- else:
- new_font_size = initial_font_size
-
- return new_font_size
-
- # Disable vae decode on next Hold
- update_value_by_id("vae_decode", my_unique_id, False)
-
- # Extract plot dimensions
- num_rows = max(len(Y_value) if Y_value is not None else 0, 1)
- num_cols = max(len(X_value) if X_value is not None else 0, 1)
+ return image_list, max_width, max_height, latent_new
def rearrange_tensors(latent, num_cols, num_rows):
new_latent = []
@@ -844,172 +1012,191 @@ def rearrange_tensors(latent, num_cols, num_rows):
new_latent.append(latent[index])
return new_latent
- # Rearrange latent array to match preview image grid
- latent_new = rearrange_tensors(latent_new, num_cols, num_rows)
+ def calculate_background_dimensions(x_type, y_type, num_rows, num_cols, max_height, max_width, grid_spacing):
+ border_size = int((max_width//8)*1.5) if y_type != "None" or x_type != "None" else 0
+ bg_width = num_cols * (max_width + grid_spacing) - grid_spacing + border_size * (y_type != "None")
+ bg_height = num_rows * (max_height + grid_spacing) - grid_spacing + border_size * (x_type != "None")
- # Concatenate the tensors along the first dimension (dim=0)
- latent_new = torch.cat(latent_new, dim=0)
- samples_new = {"samples": latent_new}
+ x_offset_initial = border_size if y_type != "None" else 0
+ y_offset = border_size if x_type != "None" else 0
- # Store latent_new as last latent
- update_value_by_id("samples", my_unique_id, samples_new)
+ return bg_width, bg_height, x_offset_initial, y_offset
- # Calculate the dimensions of the white background image
- border_size = max_width // 15
+ def get_font(font_size):
+ return ImageFont.truetype(str(Path(ttNpaths.font_path)), font_size)
- # Modify the background width and x_offset initialization based on Y_type
- if Y_type == "Nothing":
- bg_width = num_cols * max_width + (num_cols - 1) * grid_spacing
- x_offset_initial = 0
- else:
- bg_width = num_cols * max_width + (num_cols - 1) * grid_spacing + 3 * border_size
- x_offset_initial = border_size * 3
+ def adjusted_font_size(text, initial_font_size, max_width):
+ font = get_font(initial_font_size)
+ text_width, _ = font.getsize(text)
- # Modify the background height based on X_type
- if X_type == "Nothing":
- bg_height = num_rows * max_height + (num_rows - 1) * grid_spacing
- y_offset = 0
- else:
- bg_height = num_rows * max_height + (num_rows - 1) * grid_spacing + 3 * border_size
- y_offset = border_size * 3
+ scaling_factor = 0.9
+ if text_width > (max_width * scaling_factor):
+ return int(initial_font_size * (max_width / text_width) * scaling_factor)
+ else:
+ return initial_font_size
- # Create the white background image
- background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255))
+ def create_label(img, text, initial_font_size, is_x_label=True):
+ label_width = img.width if is_x_label else img.height
- for row in range(num_rows):
+ font_size = adjusted_font_size(text, initial_font_size, label_width)
+ label_height = int(font_size * 1.5) if is_x_label else font_size
- # Initialize the X_offset
- x_offset = x_offset_initial
+ label_bg = Image.new('RGBA', (label_width, label_height), color=(255, 255, 255, 0))
+ d = ImageDraw.Draw(label_bg)
- for col in range(num_cols):
- # Calculate the index for image_list
- index = col * num_rows + row
- img = image_list[index]
+ font = get_font(font_size)
+ text_width, text_height = d.textsize(text, font=font)
+ text_x = (label_width - text_width) // 2
+ text_y = (label_height - text_height) // 2
- # Paste the image
- background.paste(img, (x_offset, y_offset))
+ d.text((text_x, text_y), text, fill='black', font=font)
- if row == 0 and X_type != "Nothing":
- # Assign text
- text = X_label[col]
+ return label_bg
+
+ def create_label(img, text, initial_font_size, is_x_label=True, max_font_size=70, min_font_size=10):
+ label_width = img.width if is_x_label else img.height
+
+ # Adjust font size
+ font_size = adjusted_font_size(text, initial_font_size, label_width)
+ font_size = min(max_font_size, font_size) # Ensure font isn't too large
+ font_size = max(min_font_size, font_size) # Ensure font isn't too small
+
+ label_height = int(font_size * 1.5) if is_x_label else font_size
+
+ label_bg = Image.new('RGBA', (label_width, label_height), color=(255, 255, 255, 0))
+ d = ImageDraw.Draw(label_bg)
+
+ font = get_font(font_size)
+
+ # Check if text will fit, if not insert ellipsis and reduce text
+ if d.textsize(text, font=font)[0] > label_width:
+ while d.textsize(text+'...', font=font)[0] > label_width and len(text) > 0:
+ text = text[:-1]
+ text = text + '...'
+
+ # Compute text width and height for multi-line text
+ text_lines = text.split('\n')
+ text_widths, text_heights = zip(*[d.textsize(line, font=font) for line in text_lines])
+ max_text_width = max(text_widths)
+ total_text_height = sum(text_heights)
+
+ # Compute position for each line of text
+ lines_positions = []
+ current_y = 0
+ for line, line_width, line_height in zip(text_lines, text_widths, text_heights):
+ text_x = (label_width - line_width) // 2
+ text_y = current_y + (label_height - total_text_height) // 2
+ current_y += line_height
+ lines_positions.append((line, (text_x, text_y)))
+
+ # Draw each line of text
+ for line, (text_x, text_y) in lines_positions:
+ d.text((text_x, text_y), line, fill='black', font=font)
+
+ return label_bg
+
+ # Define vars, get label and sample images
+ x_label, y_label = [], []
+ max_width, max_height = 0, 0
+ latent_new = []
+ image_list = []
+
+ for x_index, x_value in enumerate(x_values):
+ plot_image_vars, x_value_label = define_variable(plot_image_vars, x_type, x_value, x_index)
+ x_label = update_label(x_label, x_value_label, len(x_values))
+ if y_type != 'None':
+ for y_index, y_value in enumerate(y_values):
+ plot_image_vars, y_value_label = define_variable(plot_image_vars, y_type, y_value, y_index)
+ y_label = update_label(y_label, y_value_label, len(y_values))
+
+ ttNl(f'{CC.GREY}X: {x_value_label}, Y: {y_value_label}').t('Plot Values ->').p()
+ image_list, max_width, max_height, latent_new = sample_plot_image(plot_image_vars, latent_image, preview_latent, max_width, max_height, latent_new, image_list)
+ else:
+ ttNl(f'{CC.GREY}X: {x_value_label}').t('Plot Values ->').p()
+ image_list, max_width, max_height, latent_new = sample_plot_image(plot_image_vars, latent_image, preview_latent, max_width, max_height, latent_new, image_list)
- # Add the corresponding X_value as a label above the image
- initial_font_size = int(48 * img.width / 512)
- font_size = adjusted_font_size(text, initial_font_size, img.width)
- label_height = int(font_size*1.5)
+ # Extract plot dimensions
+ num_rows = len(y_values) if len(y_values) > 0 else 1
+ num_cols = len(x_values) if len(x_values) > 0 else 1
- # Create a white background label image
- label_bg = Image.new('RGBA', (img.width, label_height), color=(255, 255, 255, 0))
- d = ImageDraw.Draw(label_bg)
+ # Rearrange latent array to match preview image grid
+ latent_new = rearrange_tensors(latent_new, num_cols, num_rows)
- # Create the font object
- font = ImageFont.truetype(str(Path(font_path)), font_size)
+ # Concatenate the tensors along the first dimension (dim=0)
+ latent_new = torch.cat(latent_new, dim=0)
+
+ # Update pipe, Store latent_new as last latent, Disable vae decode on next Hold
+ pipe['vars']['samples'] = {"samples": latent_new}
+ update_value_by_id("samples", my_unique_id, pipe['vars']['samples'])
+ update_value_by_id("vae_decode", my_unique_id, False)
- # Calculate the text size and the starting position
- text_width, text_height = d.textsize(text, font=font)
- text_x = (img.width - text_width) // 2
- text_y = (label_height - text_height) // 2
+ # Calculate the background dimensions
+ bg_width, bg_height, x_offset_initial, y_offset = calculate_background_dimensions(x_type, y_type, num_rows, num_cols, max_height, max_width, grid_spacing)
- # Add the text to the label image
- d.text((text_x, text_y), text, fill='black', font=font)
+ # Create the white background image
+ background = Image.new('RGBA', (int(bg_width), int(bg_height)), color=(255, 255, 255, 255))
- # Calculate the available space between the top of the background and the top of the image
- available_space = y_offset - label_height
+ for row_index in range(num_rows):
+ x_offset = x_offset_initial
- # Calculate the new Y position for the label image
- label_y = available_space // 2
+ for col_index in range(num_cols):
+ index = col_index * num_rows + row_index
+ img = image_list[index]
+ background.paste(img, (x_offset, y_offset))
- # Paste the label image above the image on the background using alpha_composite()
+ # Handle X label
+ if row_index == 0 and x_type != "None":
+ label_bg = create_label(img, x_label[col_index], int(48 * img.width / 512))
+ label_y = (y_offset - label_bg.height) // 2
background.alpha_composite(label_bg, (x_offset, label_y))
- if col == 0 and Y_type != "Nothing":
- # Assign text
- text = Y_label[row]
-
- # Add the corresponding Y_value as a label to the left of the image
- initial_font_size = int(48 * img.height / 512)
- font_size = adjusted_font_size(text, initial_font_size, img.height)
+ # Handle Y label
+ if col_index == 0 and y_type != "None":
+ label_bg = create_label(img, y_label[row_index], int(48 * img.height / 512), False)
+ label_bg = label_bg.rotate(90, expand=True)
- # Create a white background label image
- label_bg = Image.new('RGBA', (img.height, font_size), color=(255, 255, 255, 0))
- d = ImageDraw.Draw(label_bg)
-
- # Create the font object
- font = ImageFont.truetype(str(Path(font_path)), font_size)
-
- # Calculate the text size and the starting position
- text_width, text_height = d.textsize(text, font=font)
- text_x = (img.height - text_width) // 2
- text_y = (font_size - text_height) // 2
-
- # Add the text to the label image
- d.text((text_x, text_y), text, fill='black', font=font)
-
- # Rotate the label_bg 90 degrees counter-clockwise
- if Y_type != "Latent Batch":
- label_bg = label_bg.rotate(90, expand=True)
-
- # Calculate the available space between the left of the background and the left of the image
- available_space = x_offset - label_bg.width
-
- # Calculate the new X position for the label image
- label_x = available_space // 2
-
- # Calculate the Y position for the label image
+ label_x = (x_offset - label_bg.width) // 2
label_y = y_offset + (img.height - label_bg.height) // 2
-
- # Paste the label image to the left of the image on the background using alpha_composite()
background.alpha_composite(label_bg, (label_x, label_y))
- # Update the x_offset
x_offset += img.width + grid_spacing
- # Update the y_offset
y_offset += img.height + grid_spacing
images = pil2tensor(background)
update_value_by_id("images", my_unique_id, images)
+ pipe["vars"]["images"] = images
+
+ results = save_images(self, images, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id)
- # Generate image results and store
- results = save_images(self, images, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo)
update_value_by_id("results", my_unique_id, results)
# Clean loaded_objects
update_loaded_objects(prompt)
- new_pipe = (model, positive, negative, latent, vae, clip, images, seed,)
+ new_pipe = {**pipe, 'orig': pipe['vars']}
if image_output in ("Hide", "Hide/Save"):
- return (new_pipe, model, positive, negative, {"samples": latent_new}, vae, clip, images, seed)
+ return get_output(new_pipe)
- # Output image results to ui and node outputs
- return {"ui": {"images": results}, "result": (new_pipe, model, positive, negative, {"samples": latent_new}, vae, clip, images, seed)}
-
-
- samples = handle_upscale(samples, upscale_method, factor, crop)
- update_loaded_objects(prompt)
- my_unique_id = int(my_unique_id)
+ return {"ui": {"images": results}, "result": get_output(new_pipe)}
- if vae == (None,):
- print(f'\033[32mpipeKSampler[{my_unique_id}] Warning:\033[0m No vae input detected, preview and output image disabled.\n')
- image_output = "Disabled"
- latent: Tensor | None = None
- preview_prefix = f"KSpipe_{my_unique_id:02d}"
+ preview_latent = True
+ if image_output in ("Hide", "Hide/Save"):
+ preview_latent = False
- if sampler_state == "Sample":
- return process_sample_state(self, model, seed, steps, cfg, sampler_name, scheduler, positive, negative, samples, denoise, vae, clip, images, image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id)
+ if sampler_state == "Sample" and xyPlot is None:
+ return process_sample_state(self, pipe, lora_name, lora_model_strength, lora_clip_strength, steps, cfg, sampler_name, scheduler, denoise, image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent)
+ elif sampler_state == "Sample" and xyPlot is not None:
+ return process_xyPlot(self, pipe, lora_name, lora_model_strength, lora_clip_strength, steps, cfg, sampler_name, scheduler, denoise, image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id, preview_latent, xyPlot)
+
elif sampler_state == "Hold":
- return process_hold_state(self, model, positive, negative, vae, clip, seed, image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id, samples, images)
-
- elif sampler_state == "Script":
- return process_script_state(self, script, samples, images, vae, my_unique_id, seed, model, positive, negative, clip, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo, scheduler, steps, cfg, sampler_name, denoise)
+ return process_hold_state(self, pipe, image_output, preview_prefix, save_prefix, prompt, extra_pnginfo, my_unique_id)
-
-#---------------------------------------------------------------ttN Pipe KSampler END---------------------------------------------------------------#
-#---------------------------------------------------------------ttN/pipe START----------------------------------------------------------------------#
class ttN_pipe_IN:
+ version = '1.0.0'
def __init__(self):
pass
@@ -1018,8 +1205,6 @@ def INPUT_TYPES(s):
return {
"required": {
"model": ("MODEL",),
- },
- "optional": {
"pos": ("CONDITIONING",),
"neg": ("CONDITIONING",),
"latent": ("LATENT",),
@@ -1028,6 +1213,7 @@ def INPUT_TYPES(s):
"image": ("IMAGE",),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
},
+ "hidden": {"ttNnodeVersion": ttN_pipe_IN.version},
}
RETURN_TYPES = ("PIPE_LINE", )
@@ -1037,10 +1223,29 @@ def INPUT_TYPES(s):
CATEGORY = "ttN/pipe"
def flush(self, model, pos=0, neg=0, latent=0, vae=0, clip=0, image=0, seed=0):
- pipe_line = (model, pos, neg, latent, vae, clip, image, seed, )
- return (pipe_line, )
+ pipe = {"vars": {"model": model,
+ "positive": pos,
+ "negative": neg,
+ "samples": latent,
+ "vae": vae,
+ "clip": clip,
+ "images": image,
+ "seed": seed},
+ "orig": {"model": model,
+ "positive": pos,
+ "negative": neg,
+ "samples": latent,
+ "vae": vae,
+ "clip": clip,
+ "images": image,
+ "seed": seed},
+
+ "loader_settings": {}
+ }
+ return (pipe, )
class ttN_pipe_OUT:
+ version = '1.0.0'
def __init__(self):
pass
@@ -1050,6 +1255,7 @@ def INPUT_TYPES(s):
"required": {
"pipe": ("PIPE_LINE",),
},
+ "hidden": {"ttNnodeVersion": ttN_pipe_OUT.version},
}
RETURN_TYPES = ("MODEL", "CONDITIONING", "CONDITIONING", "LATENT", "VAE", "CLIP", "IMAGE", "INT", "PIPE_LINE",)
@@ -1059,10 +1265,11 @@ def INPUT_TYPES(s):
CATEGORY = "ttN/pipe"
def flush(self, pipe):
- model, pos, neg, latent, vae, clip, image, seed = pipe
+ model, pos, neg, latent, vae, clip, image, seed = pipe['vars'].values()
return model, pos, neg, latent, vae, clip, image, seed, pipe
class ttN_pipe_EDIT:
+ version = '1.0.0'
def __init__(self):
pass
@@ -1079,6 +1286,7 @@ def INPUT_TYPES(s):
"image": ("IMAGE",),
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff, "forceInput": True}),
},
+ "hidden": {"ttNnodeVersion": ttN_pipe_EDIT.version},
}
RETURN_TYPES = ("PIPE_LINE", )
@@ -1088,37 +1296,38 @@ def INPUT_TYPES(s):
CATEGORY = "ttN/pipe"
def flush(self, pipe, model=None, pos=None, neg=None, latent=None, vae=None, clip=None, image=None, seed=None):
- new_model, new_pos, new_neg, new_latent, new_vae, new_clip, new_image, new_seed = pipe
+ new_model, new_pos, new_neg, new_latent, new_vae, new_clip, new_image, new_seed = pipe['orig'].values()
if model is not None:
- new_model = model
+ pipe['vars']['model'] = model
if pos is not None:
- new_pos = pos
+ pipe['vars']['positive'] = pos
if neg is not None:
- new_neg = neg
+ pipe['vars']['negative'] = neg
if latent is not None:
- new_latent = latent
+ pipe['vars']['latent'] = latent
if vae is not None:
- new_vae = vae
+ pipe['vars']['vae'] = vae
if clip is not None:
- new_clip = clip
+ pipe['vars']['clip'] = clip
if image is not None:
- new_image = image
+ pipe['vars']['images'] = image
if seed is not None:
- new_seed = seed
+ pipe['vars']['seed'] = seed
- pipe = new_model, new_pos, new_neg, new_latent, new_vae, new_clip, new_image, new_seed
+ new_pipe = {**pipe, 'orig': pipe['vars']}
- return (pipe, )
+ return (new_pipe, )
class ttN_pipe_2BASIC:
+ version = '1.0.0'
def __init__(self):
pass
@@ -1128,6 +1337,7 @@ def INPUT_TYPES(s):
"required": {
"pipe": ("PIPE_LINE",),
},
+ "hidden": {"ttNnodeVersion": ttN_pipe_2BASIC.version},
}
RETURN_TYPES = ("BASIC_PIPE", "PIPE_LINE",)
@@ -1137,16 +1347,17 @@ def INPUT_TYPES(s):
CATEGORY = "ttN/pipe"
def flush(self, pipe):
- model, pos, neg, _, vae, clip, _, _ = pipe
- basic_pipe = (model, clip, vae, pos, neg)
+ basic_pipe = (pipe['vars'].get('model'), pipe['vars'].get('clip'), pipe['vars'].get('vae'), pipe['vars'].get('positive'), pipe['vars'].get('negative'))
return (basic_pipe, pipe, )
class ttN_pipe_2DETAILER:
+ version = '1.0.0'
@classmethod
def INPUT_TYPES(s):
return {"required": {"pipe": ("PIPE_LINE",),
"bbox_detector": ("BBOX_DETECTOR", ), },
"optional": {"sam_model_opt": ("SAM_MODEL", ), },
+ "hidden": {"ttNnodeVersion": ttN_pipe_2DETAILER.version},
}
RETURN_TYPES = ("DETAILER_PIPE", "PIPE_LINE" )
@@ -1156,15 +1367,114 @@ def INPUT_TYPES(s):
CATEGORY = "ttN/pipe"
def flush(self, pipe, bbox_detector, sam_model_opt=None):
- model, positive, negative, _, vae, _, _, _ = pipe
- detailer_pipe = model, vae, positive, negative, bbox_detector, sam_model_opt
+ detailer_pipe = pipe['vars'].get('model'), pipe['vars'].get('vae'), pipe['vars'].get('positive'), pipe['vars'].get('negative'), bbox_detector, sam_model_opt
return (detailer_pipe, pipe, )
-#---------------------------------------------------------------ttN/pipe END------------------------------------------------------------------------#
+class ttN_XYPlot:
+ version = '1.0.0'
+ lora_list = ["None"] + folder_paths.get_filename_list("loras")
+ lora_strengths = {"min": -4.0, "max": 4.0, "step": 0.01}
+ token_normalization = ["none", "mean", "length", "length+mean"]
+ weight_interpretation = ["comfy", "A1111", "compel", "comfy++"]
+
+ loader_dict = {
+ "ckpt_name": folder_paths.get_filename_list("checkpoints"),
+ "vae_name": ["Baked-VAE"] + folder_paths.get_filename_list("vae"),
+ "clip_skip": {"min": -24, "max": -1, "step": 1},
+ "lora1_name": lora_list,
+ "lora1_model_strength": lora_strengths,
+ "lora1_clip_strength": lora_strengths,
+ "lora2_name": lora_list,
+ "lora2_model_strength": lora_strengths,
+ "lora2_clip_strength": lora_strengths,
+ "lora3_name": lora_list,
+ "lora3_model_strength": lora_strengths,
+ "lora3_clip_strength": lora_strengths,
+ "positive": [],
+ "positive_token_normalization": token_normalization,
+ "positive_weight_interpretation": weight_interpretation,
+ "negative": [],
+ "negative_token_normalization": token_normalization,
+ "negative_weight_interpretation": weight_interpretation,
+ }
+
+ sampler_dict = {
+ "lora_name": lora_list,
+ "lora_model_strength": lora_strengths,
+ "lora_clip_strength": lora_strengths,
+ "steps": {"min": 1, "max": 100, "step": 1},
+ "cfg": {"min": 0.0, "max": 100.0, "step": 1.0},
+ "sampler_name": comfy.samplers.KSampler.SAMPLERS,
+ "scheduler": comfy.samplers.KSampler.SCHEDULERS,
+ "denoise": {"min": 0.0, "max": 1.0, "step": 0.01},
+ "seed": ['increment', 'decrement', 'randomize'],
+ }
+
+ plot_dict = {**sampler_dict, **loader_dict}
+
+ plot_values = ["None",]
+ plot_values.append("---------------------")
+ for k in sampler_dict:
+ plot_values.append(f'sampler: {k}')
+ plot_values.append("---------------------")
+ for k in loader_dict:
+ plot_values.append(f'loader: {k}')
+
+ def __init__(self):
+ pass
+
+ rejected = ["None", "---------------------"]
+
+ @classmethod
+ def INPUT_TYPES(s):
+ return {
+ "required": {
+ #"info": ("INFO", {"default": "Any values not set by xyplot will be taken from the KSampler or connected pipeLoader", "multiline": True}),
+ "grid_spacing": ("INT",{"min": 0, "max": 500, "step": 5, "default": 0,}),
+ "latent_id": ("INT",{"min": 0, "max": 100, "step": 1, "default": 0, }),
+ "flip_xy": (["False", "True"],{"default": "False"}),
+ "x_axis": (ttN_XYPlot.plot_values, {"default": 'None'}),
+ "x_values": ("STRING",{"default": '', "multiline": True, "placeholder": 'insert values seperated by "; "'}),
+ "y_axis": (ttN_XYPlot.plot_values, {"default": 'None'}),
+ "y_values": ("STRING",{"default": '', "multiline": True, "placeholder": 'insert values seperated by "; "'}),
+ },
+ "hidden": {
+ "plot_dict": (ttN_XYPlot.plot_dict,),
+ "ttNnodeVersion": ttN_XYPlot.version,
+ },
+ }
+
+ RETURN_TYPES = ("XYPLOT", )
+ RETURN_NAMES = ("xyPlot", )
+ FUNCTION = "plot"
+
+ CATEGORY = "ttN/pipe"
+
+ def plot(self, grid_spacing, latent_id, flip_xy, x_axis, x_values, y_axis, y_values):
+ if x_axis in self.rejected:
+ x_axis = "None"
+ x_values = []
+ else:
+ x_values = clean_values(x_values)
+
+ if y_axis in self.rejected:
+ y_axis = "None"
+ y_values = []
+ else:
+ y_values = clean_values(y_values)
+
+ if flip_xy == "True":
+ x_axis, y_axis = y_axis, x_axis
+ x_values, y_values = y_values, x_values
+
+ xy_plot = [x_axis, x_values, y_axis, y_values, grid_spacing, latent_id]
+ return (xy_plot, )
+#---------------------------------------------------------------ttN/pipe END------------------------------------------------------------------------#
#---------------------------------------------------------------ttN/text START----------------------------------------------------------------------#
class ttN_text:
+ version = '1.0.0'
def __init__(self):
pass
@@ -1172,7 +1482,9 @@ def __init__(self):
def INPUT_TYPES(s):
return {"required": {
"text": ("STRING", {"default": "", "multiline": True}),
- }}
+ },
+ "hidden": {"ttNnodeVersion": ttN_text.version},
+ }
RETURN_TYPES = ("STRING",)
RETURN_NAMES = ("text",)
@@ -1185,6 +1497,7 @@ def conmeow(text):
return text,
class ttN_textDebug:
+ version = '1.0.0'
def __init__(self):
pass
@@ -1194,7 +1507,8 @@ def INPUT_TYPES(s):
"print_to_console": ([False, True],),
"text": ("STRING", {"default": '', "multiline": True, "forceInput": True}),
},
- "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID",},
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID",
+ "ttNnodeVersion": ttN_textDebug.version},
}
RETURN_TYPES = ("STRING",)
@@ -1214,12 +1528,12 @@ def write(print_to_console, text, prompt, extra_pnginfo, my_unique_id):
for node in extra_pnginfo["workflow"]["nodes"]:
if node['id'] == int(input_node[0]):
input_from = node['outputs'][input_node[1]]['name']
-
- print(f'\033[92m[ttN textDebug_{my_unique_id}] - \033[0;31m\'{input_from}\':\033[0m{text}')
+ ttNl(text).t(f'textDebug[{my_unique_id}] - {CC.VIOLET}{input_from}').p()
return {"ui": {"text": text},
"result": (text,)}
class ttN_concat:
+ version = '1.0.0'
def __init__(self):
pass
"""
@@ -1232,7 +1546,8 @@ def INPUT_TYPES(s):
"text2": ("STRING", {"multiline": True, "default": ''}),
"text3": ("STRING", {"multiline": True, "default": ''}),
"delimiter": ("STRING", {"default":",","multiline": False}),
- }
+ },
+ "hidden": {"ttNnodeVersion": ttN_concat.version},
}
RETURN_TYPES = ("STRING",)
@@ -1251,6 +1566,7 @@ def conmeow(self, text1='', text2='', text3='', delimiter=''):
return concat
class ttN_text3BOX_3WAYconcat:
+ version = '1.0.0'
def __init__(self):
pass
"""
@@ -1263,7 +1579,8 @@ def INPUT_TYPES(s):
"text2": ("STRING", {"multiline": True, "default": ''}),
"text3": ("STRING", {"multiline": True, "default": ''}),
"delimiter": ("STRING", {"default":",","multiline": False}),
- }
+ },
+ "hidden": {"ttNnodeVersion": ttN_text3BOX_3WAYconcat.version},
}
RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING",)
@@ -1285,6 +1602,7 @@ def conmeow(self, text1='', text2='', text3='', delimiter=''):
return text1, text2, text3, t_1n2, t_1n3, t_2n3, concat
class ttN_text7BOX_concat:
+ version = '1.0.0'
def __init__(self):
pass
"""
@@ -1301,7 +1619,8 @@ def INPUT_TYPES(s):
"text6": ("STRING", {"multiline": True, "default": ''}),
"text7": ("STRING", {"multiline": True, "default": ''}),
"delimiter": ("STRING", {"default":",","multiline": False}),
- }
+ },
+ "hidden": {"ttNnodeVersion": ttN_text7BOX_concat.version},
}
RETURN_TYPES = ("STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING", "STRING",)
@@ -1324,8 +1643,10 @@ def conmeow(self, text1, text2, text3, text4, text5, text6, text7, delimiter):
return text1, text2, text3, text4, text5, text6, text7, concat
#---------------------------------------------------------------ttN/text END------------------------------------------------------------------------#
+
#---------------------------------------------------------------ttN/util START----------------------------------------------------------------------#
class ttN_INT:
+ version = '1.0.0'
def __init__(self):
pass
@@ -1333,7 +1654,9 @@ def __init__(self):
def INPUT_TYPES(s):
return {"required": {
"int": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
- }}
+ },
+ "hidden": {"ttNnodeVersion": ttN_INT.version},
+ }
RETURN_TYPES = ("INT", "FLOAT", "STRING",)
RETURN_NAMES = ("int", "float", "text",)
@@ -1346,6 +1669,7 @@ def convert(int):
return int, float(int), str(int)
class ttN_FLOAT:
+ version = '1.0.0'
def __init__(self):
pass
@@ -1353,7 +1677,9 @@ def __init__(self):
def INPUT_TYPES(s):
return {"required": {
"float": ("FLOAT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
- }}
+ },
+ "hidden": {"ttNnodeVersion": ttN_FLOAT.version},
+ }
RETURN_TYPES = ("FLOAT", "INT", "STRING",)
RETURN_NAMES = ("float", "int", "text",)
@@ -1366,6 +1692,7 @@ def convert(float):
return float, int(float), str(float)
class ttN_SEED:
+ version = '1.0.0'
def __init__(self):
pass
@@ -1373,7 +1700,9 @@ def __init__(self):
def INPUT_TYPES(s):
return {"required": {
"seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
- }}
+ },
+ "hidden": {"ttNnodeVersion": ttN_SEED.version},
+ }
RETURN_TYPES = ("INT",)
RETURN_NAMES = ("seed",)
@@ -1389,10 +1718,11 @@ def plant(seed):
#---------------------------------------------------------------ttN/image START---------------------------------------------------------------------#
-# ttN RemBG
+#class ttN_imageREMBG:
try:
from rembg import remove
class ttN_imageREMBG:
+ version = '1.0.0'
def __init__(self):
pass
@@ -1403,7 +1733,8 @@ def INPUT_TYPES(s):
"image_output": (["Hide", "Preview", "Save", "Hide/Save"],{"default": "Preview"}),
"save_prefix": ("STRING", {"default": "ComfyUI"}),
},
- "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID",},
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID",
+ "ttNnodeVersion": ttN_imageREMBG.version},
}
@@ -1433,7 +1764,7 @@ def remove_background(self, image, image_output, save_prefix, prompt, extra_pngi
else:
# Define preview_prefix
preview_prefix = "ttNrembg_{:02d}".format(int(my_unique_id))
- results = save_images(self, tensor, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo)
+ results = save_images(self, tensor, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id)
if image_output in ("Hide", "Hide/Save"):
return (tensor, mask)
@@ -1443,6 +1774,7 @@ def remove_background(self, image, image_output, save_prefix, prompt, extra_pngi
"result": (tensor, mask)}
except:
class ttN_imageREMBG:
+ version = '0.0.0'
def __init__(self):
pass
@@ -1452,6 +1784,7 @@ def INPUT_TYPES(s):
"error": ("STRING",{"default": "RemBG is not installed", "multiline": False, 'readonly': True}),
"link": ("STRING",{"default": "https://github.com/danielgatis/rembg", "multiline": False}),
},
+ "hidden": {"ttNnodeVersion": ttN_imageREMBG.version},
}
@@ -1463,6 +1796,7 @@ def remove_background(error):
return None
class ttN_imageOUPUT:
+ version = '1.0.0'
def __init__(self):
pass
@@ -1471,9 +1805,15 @@ def INPUT_TYPES(s):
return {"required": {
"image": ("IMAGE",),
"image_output": (["Hide", "Preview", "Save", "Hide/Save"],{"default": "Preview"}),
+ "output_path": ("STRING", {"default": folder_paths.get_output_directory(), "multiline": False}),
"save_prefix": ("STRING", {"default": "ComfyUI"}),
+ "number_padding": (["None", 2, 3, 4, 5, 6, 7, 8, 9],{"default": 5}),
+ "overwrite_existing": (["True", "False"],{"default": "False"}),
+ "embed_workflow": (["True", "False"],),
+
},
- "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID",},
+ "hidden": {"prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID",
+ "ttNnodeVersion": ttN_imageOUPUT.version},
}
RETURN_TYPES = ("IMAGE",)
@@ -1482,11 +1822,11 @@ def INPUT_TYPES(s):
CATEGORY = "ttN/image"
OUTPUT_NODE = True
- def output(self, image, image_output, save_prefix, prompt, extra_pnginfo, my_unique_id):
+ def output(self, image, image_output, output_path, save_prefix, number_padding, overwrite_existing, embed_workflow, prompt, extra_pnginfo, my_unique_id):
# Define preview_prefix
preview_prefix = "ttNimgOUT_{:02d}".format(int(my_unique_id))
- results = save_images(self, image, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo)
+ results = save_images(self, image, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id, embed_workflow, output_path, number_padding=number_padding, overwrite_existing=overwrite_existing)
if image_output in ("Hide", "Hide/Save"):
return (image,)
@@ -1496,6 +1836,7 @@ def output(self, image, image_output, save_prefix, prompt, extra_pnginfo, my_uni
"result": (image,)}
class ttN_modelScale:
+ version = '1.0.0'
upscale_methods = ["nearest-exact", "bilinear", "area"]
crop_methods = ["disabled", "center"]
@@ -1508,15 +1849,16 @@ def INPUT_TYPES(s):
"rescale_method": (s.upscale_methods,),
"rescale": (["by percentage", "to Width/Height", 'to longer side - maintain aspect'],),
"percent": ("INT", {"default": 50, "min": 0, "max": 1000, "step": 1}),
- "width": ("INT", {"default": 1024, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
- "height": ("INT", {"default": 1024, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
- "longer_side": ("INT", {"default": 1024, "min": 1, "max": MAX_RESOLUTION, "step": 1}),
+ "width": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
+ "height": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
+ "longer_side": ("INT", {"default": 1024, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
"crop": (s.crop_methods,),
"image_output": (["Hide", "Preview", "Save", "Hide/Save"],),
"save_prefix": ("STRING", {"default": "ComfyUI"}),
"output_latent": ([False, True],{"default": True}),
"vae": ("VAE",),},
- "hidden": { "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID",},
+ "hidden": { "prompt": "PROMPT", "extra_pnginfo": "EXTRA_PNGINFO", "my_unique_id": "UNIQUE_ID",
+ "ttNnodeVersion": ttN_modelScale.version},
}
RETURN_TYPES = ("LATENT", "IMAGE",)
@@ -1588,20 +1930,42 @@ def upscale(self, model_name, image, info, rescale_after_model, rescale_method,
t = None
preview_prefix = "ttNhiresfix_{:02d}".format(int(my_unique_id))
- results = save_images(self, s, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo)
+ results = save_images(self, s, preview_prefix, save_prefix, image_output, prompt, extra_pnginfo, my_unique_id)
if image_output in ("Hide", "Hide/Save"):
return ({"samples":t}, s,)
return {"ui": {"images": results},
"result": ({"samples":t}, s,)}
-
#---------------------------------------------------------------ttN/image END-----------------------------------------------------------------------#
+TTN_VERSIONS = {
+ "tinyterraNodes": ttN_version,
+ "pipeLoader": ttN_TSC_pipeLoader.version,
+ "pipeKSampler": ttN_TSC_pipeKSampler.version,
+ "pipeIN": ttN_pipe_IN.version,
+ "pipeOUT": ttN_pipe_OUT.version,
+ "pipeEDIT": ttN_pipe_EDIT.version,
+ "pipe2BASIC": ttN_pipe_2BASIC.version,
+ "pipe2DETAILER": ttN_pipe_2DETAILER.version,
+ "xyPlot": ttN_XYPlot.version,
+ "text": ttN_text.version,
+ "textDebug": ttN_textDebug.version,
+ "concat": ttN_concat.version,
+ "text3BOX_3WAYconcat": ttN_text3BOX_3WAYconcat.version,
+ "text7BOX_concat": ttN_text7BOX_concat.version,
+ "imageOutput": ttN_imageOUPUT.version,
+ "imageREMBG": ttN_imageREMBG.version,
+ "hiresfixScale": ttN_modelScale.version,
+ "int": ttN_INT.version,
+ "float": ttN_FLOAT.version,
+ "seed": ttN_SEED.version
+}
NODE_CLASS_MAPPINGS = {
#ttN/pipe
"ttN pipeLoader": ttN_TSC_pipeLoader,
"ttN pipeKSampler": ttN_TSC_pipeKSampler,
+ "ttN xyPlot": ttN_XYPlot,
"ttN pipeIN": ttN_pipe_IN,
"ttN pipeOUT": ttN_pipe_OUT,
"ttN pipeEDIT": ttN_pipe_EDIT,
@@ -1629,12 +1993,13 @@ def upscale(self, model_name, image, info, rescale_after_model, rescale_method,
#ttN/pipe
"ttN pipeLoader": "pipeLoader",
"ttN pipeKSampler": "pipeKSampler",
+ "ttN xyPlot": "xyPlot",
"ttN pipeIN": "pipeIN",
"ttN pipeOUT": "pipeOUT",
"ttN pipeEDIT": "pipeEDIT",
"ttN pipe2BASIC": "pipe > basic_pipe",
"ttN pipe2DETAILER": "pipe > detailer_pipe",
-
+
#ttN/text
"ttN text": "text",
"ttN textDebug": "textDebug",
@@ -1653,11 +2018,11 @@ def upscale(self, model_name, image, info, rescale_after_model, rescale_method,
"ttN seed": "seed"
}
-print("\033[92m[t ttNodes Loaded t]\033[0m")
+ttNl('Loaded').full().p()
#---------------------------------------------------------------------------------------------------------------------------------------------------#
# (KSampler Modified from TSC Efficiency Nodes) - https://github.com/LucianoCirino/efficiency-nodes-comfyui #
# (upscale from QualityOfLifeSuite_Omar92) - https://github.com/omar92/ComfyUI-QualityOfLifeSuit_Omar92 #
# (Node weights from BlenderNeko/ComfyUI_ADV_CLIP_emb) - https://github.com/BlenderNeko/ComfyUI_ADV_CLIP_emb #
# (misc. from WAS node Suite) - https://github.com/WASasquatch/was-node-suite-comfyui #
-#---------------------------------------------------------------------------------------------------------------------------------------------------#
+#---------------------------------------------------------------------------------------------------------------------------------------------------#
\ No newline at end of file
diff --git a/tinyterra_xyPlot.png b/tinyterra_xyPlot.png
new file mode 100644
index 0000000..53d3655
Binary files /dev/null and b/tinyterra_xyPlot.png differ
diff --git a/ttNdev.py b/ttNdev.py
index 5550ee1..42640cd 100644
--- a/ttNdev.py
+++ b/ttNdev.py
@@ -1,3 +1,8 @@
+import folder_paths
+import comfy.samplers
+
+MAX_RESOLUTION=8192
+
# in_dev - likely broken
class ttN_debugInput:
@classmethod
@@ -59,41 +64,37 @@ def roundnround(bus_line):
print("busOUT:--",bus_line)
return (bus_line,)
-class ttN_XY_Plot:
- def __init__(self):
- pass
-
+class ttN_seedDebug:
@classmethod
def INPUT_TYPES(s):
- return {
- "required": {
- "x_axis": (['None',], {"default": 'None'}),
- "x_values": ("STRING",{"default": '', "multiline": True}),
- "y_axis": (['None',], {"default": 'None'}),
- "y_values": ("STRING",{"default": '', "multiline": True}),
- },
- }
+ return {"required": {
+ "ttNseed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
+ }}
- RETURN_TYPES = ("XY_PLOT", )
- RETURN_NAMES = ("xy_plot", )
- FUNCTION = "plot"
+ RETURN_TYPES = ("INT",)
+ RETURN_NAMES = ("seed",)
+ FUNCTION = "plant"
- CATEGORY = "ttN/pipe"
+ CATEGORY = "ttN/dev"
- def plot(self, x_axis, x_values, y_axis, y_values):
- xy_plot = [x_axis, x_values, y_axis, y_values]
- return (xy_plot, )
+ @staticmethod
+ def plant(ttNseed, *args, **kwargs):
+ print('Seed:', ttNseed)
+ print('args:', args)
+ print('kwargs:',kwargs)
+ return (ttNseed,)
NODE_CLASS_MAPPINGS = {
"ttN debugInput": ttN_debugInput,
"ttN busIN": ttN_busIN,
"ttN busOUT": ttN_busOUT,
- "ttN xyPlot": ttN_XY_Plot
+ "ttN seedDebug": ttN_seedDebug,
+
}
NODE_DISPLAY_NAME_MAPPINGS = {
"ttN debugInput": "debugInput",
"ttN busIN": "busIN",
"ttN busOUT": "busOUT",
- "ttN xyPlot": "xyPlot",
+ "ttN seedDebug": "seedDebug",
}
\ No newline at end of file
diff --git a/workflows/tinyterra_imagebash.json b/workflows/tinyterra_imagebash.json
index 9c6e6b6..40d8595 100644
--- a/workflows/tinyterra_imagebash.json
+++ b/workflows/tinyterra_imagebash.json
@@ -1,13 +1,13 @@
{
- "last_node_id": 18,
- "last_link_id": 32,
+ "last_node_id": 42,
+ "last_link_id": 60,
"nodes": [
{
- "id": 3,
+ "id": 22,
"type": "ttN pipeOUT",
"pos": [
- 618,
- 14
+ 1180,
+ 30
],
"size": {
"0": 210,
@@ -16,13 +16,13 @@
"flags": {
"collapsed": true
},
- "order": 5,
+ "order": 7,
"mode": 0,
"inputs": [
{
"name": "pipe",
"type": "PIPE_LINE",
- "link": 2
+ "link": 38
}
],
"outputs": [
@@ -59,8 +59,11 @@
{
"name": "clip",
"type": "CLIP",
- "links": null,
- "shape": 3
+ "links": [
+ 43
+ ],
+ "shape": 3,
+ "slot_index": 5
},
{
"name": "image",
@@ -78,40 +81,41 @@
"name": "pipe",
"type": "PIPE_LINE",
"links": [
- 3,
- 6
+ 44
],
"shape": 3,
"slot_index": 8
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN pipeOUT",
+ "ttNbgOverride": "",
+ "ttNnodeVersion": "1.0.0"
},
"color": "#222",
"bgcolor": "#000"
},
{
- "id": 2,
+ "id": 32,
"type": "ttN pipeOUT",
"pos": [
- 442,
- 15
+ 1650,
+ 30
],
"size": {
- "0": 210,
+ "0": 140,
"1": 186
},
"flags": {
"collapsed": true
},
- "order": 2,
+ "order": 11,
"mode": 0,
"inputs": [
{
"name": "pipe",
"type": "PIPE_LINE",
- "link": 1
+ "link": 44
}
],
"outputs": [
@@ -142,8 +146,11 @@
{
"name": "vae",
"type": "VAE",
- "links": null,
- "shape": 3
+ "links": [
+ 52
+ ],
+ "shape": 3,
+ "slot_index": 4
},
{
"name": "clip",
@@ -167,24 +174,26 @@
"name": "pipe",
"type": "PIPE_LINE",
"links": [
- 2
+ 49
],
"shape": 3,
"slot_index": 8
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN pipeOUT",
+ "ttNbgOverride": "",
+ "ttNnodeVersion": "1.0.0"
},
"color": "#222",
"bgcolor": "#000"
},
{
- "id": 4,
+ "id": 20,
"type": "ttN pipeOUT",
"pos": [
- 1182,
- 13
+ 440,
+ 30
],
"size": {
"0": 210,
@@ -193,13 +202,13 @@
"flags": {
"collapsed": true
},
- "order": 7,
+ "order": 2,
"mode": 0,
"inputs": [
{
"name": "pipe",
"type": "PIPE_LINE",
- "link": 3
+ "link": 33
}
],
"outputs": [
@@ -236,11 +245,8 @@
{
"name": "clip",
"type": "CLIP",
- "links": [
- 13
- ],
- "shape": 3,
- "slot_index": 5
+ "links": null,
+ "shape": 3
},
{
"name": "image",
@@ -258,73 +264,130 @@
"name": "pipe",
"type": "PIPE_LINE",
"links": [
- 21
+ 40
],
- "shape": 3,
- "slot_index": 8
+ "shape": 3
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN pipeOUT",
+ "ttNbgOverride": "",
+ "ttNnodeVersion": "1.0.0"
},
"color": "#222",
"bgcolor": "#000"
},
{
- "id": 17,
- "type": "EmptyLatentImage",
+ "id": 21,
+ "type": "ttN pipeOUT",
"pos": [
- 790,
- 122
- ],
- "size": [
- 210,
- 106
+ 610,
+ 30
],
- "flags": {},
- "order": 0,
+ "size": {
+ "0": 210,
+ "1": 186
+ },
+ "flags": {
+ "collapsed": true
+ },
+ "order": 5,
"mode": 0,
+ "inputs": [
+ {
+ "name": "pipe",
+ "type": "PIPE_LINE",
+ "link": 40,
+ "slot_index": 0
+ }
+ ],
"outputs": [
{
- "name": "LATENT",
+ "name": "model",
+ "type": "MODEL",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "pos",
+ "type": "CONDITIONING",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "neg",
+ "type": "CONDITIONING",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "latent",
"type": "LATENT",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "vae",
+ "type": "VAE",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "clip",
+ "type": "CLIP",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "image",
+ "type": "IMAGE",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "seed",
+ "type": "INT",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "pipe",
+ "type": "PIPE_LINE",
"links": [
- 27
+ 38,
+ 42
],
"shape": 3,
- "slot_index": 0
+ "slot_index": 8
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN pipeOUT",
+ "ttNbgOverride": "",
+ "ttNnodeVersion": "1.0.0"
},
- "widgets_values": [
- 512,
- 512,
- 1
- ],
"color": "#222",
"bgcolor": "#000"
},
{
- "id": 7,
+ "id": 24,
"type": "ttN pipeKSampler",
"pos": [
- 787,
- 266
+ 410,
+ 270
],
"size": [
- 234.49201354980482,
- 662.7804020996095
+ 340,
+ 650
],
"flags": {},
- "order": 8,
+ "order": 3,
"mode": 0,
"inputs": [
{
"name": "pipe",
"type": "PIPE_LINE",
- "link": 6
+ "link": 35
},
{
"name": "optional_model",
@@ -334,7 +397,7 @@
{
"name": "optional_positive",
"type": "CONDITIONING",
- "link": 26
+ "link": null
},
{
"name": "optional_negative",
@@ -344,7 +407,7 @@
{
"name": "optional_latent",
"type": "LATENT",
- "link": 27
+ "link": null
},
{
"name": "optional_vae",
@@ -357,8 +420,8 @@
"link": null
},
{
- "name": "script",
- "type": "SCRIPT",
+ "name": "xyPlot",
+ "type": "XYPLOT",
"link": null
},
{
@@ -425,7 +488,7 @@
"name": "image",
"type": "IMAGE",
"links": [
- 9
+ 36
],
"shape": 3,
"slot_index": 7
@@ -438,7 +501,8 @@
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN pipeKSampler",
+ "ttNnodeVersion": "1.0.0"
},
"widgets_values": [
"None",
@@ -450,135 +514,133 @@
"Sample",
"24",
8,
- "euler_ancestral",
+ "ddim",
"karras",
1,
"Save",
"Comfy",
- 212663220667889,
+ 940490427037325,
"randomize"
],
"color": "#323",
"bgcolor": "#535"
},
{
- "id": 10,
- "type": "ttN imageREMBG",
+ "id": 26,
+ "type": "Reroute",
"pos": [
- 1039,
- 406
+ 760,
+ 40
],
"size": [
- 210,
- 302.24616699218745
+ 82,
+ 26
],
"flags": {},
- "order": 12,
+ "order": 6,
"mode": 0,
"inputs": [
{
- "name": "image",
- "type": "IMAGE",
- "link": 9
+ "name": "",
+ "type": "*",
+ "link": 36
}
],
"outputs": [
{
- "name": "image",
+ "name": "IMAGE",
"type": "IMAGE",
"links": [
- 29
+ 39
],
- "shape": 3,
"slot_index": 0
- },
- {
- "name": "mask",
- "type": "MASK",
- "links": [
- 11
- ],
- "shape": 3,
- "slot_index": 1
}
],
"properties": {
- "ttNbgOverride": "black"
+ "showOutputText": true,
+ "horizontal": false
},
- "widgets_values": [
- "Preview",
- "ComfyUI"
- ],
- "color": "#232",
- "bgcolor": "#353"
+ "color": "#2a363b",
+ "bgcolor": "#3f5159"
},
{
- "id": 13,
- "type": "BNK_CLIPTextEncodeAdvanced",
+ "id": 25,
+ "type": "ttN pipeKSampler",
"pos": [
- 1327,
- 75
+ 780,
+ 270
+ ],
+ "size": [
+ 234,
+ 660
],
- "size": {
- "0": 278.2790222167969,
- "1": 150.91458129882812
- },
"flags": {},
- "order": 10,
+ "order": 8,
"mode": 0,
"inputs": [
{
- "name": "clip",
- "type": "CLIP",
- "link": 13
- }
- ],
- "outputs": [
+ "name": "pipe",
+ "type": "PIPE_LINE",
+ "link": 42
+ },
{
- "name": "CONDITIONING",
+ "name": "optional_model",
+ "type": "MODEL",
+ "link": null
+ },
+ {
+ "name": "optional_positive",
"type": "CONDITIONING",
- "links": [
- 25
- ],
- "shape": 3,
- "slot_index": 0
+ "link": 41
+ },
+ {
+ "name": "optional_negative",
+ "type": "CONDITIONING",
+ "link": null
+ },
+ {
+ "name": "optional_latent",
+ "type": "LATENT",
+ "link": 37
+ },
+ {
+ "name": "optional_vae",
+ "type": "VAE",
+ "link": null
+ },
+ {
+ "name": "optional_clip",
+ "type": "CLIP",
+ "link": null
+ },
+ {
+ "name": "xyPlot",
+ "type": "XYPLOT",
+ "link": null
+ },
+ {
+ "name": "seed",
+ "type": "INT",
+ "link": null,
+ "widget": {
+ "name": "seed",
+ "config": [
+ "INT",
+ {
+ "default": 0,
+ "min": 0,
+ "max": 18446744073709552000
+ }
+ ]
+ }
}
],
- "properties": {
- "ttNbgOverride": "black"
- },
- "widgets_values": [
- "(RAW photo:1.2), perfect composition, (masterpiece, 8k, absurdres, best quality, intricate), realistic, raytracing, dark forest, mystical, pathway, horse, wide shot, side on, full body",
- "none",
- "comfy++"
- ],
- "color": "#222",
- "bgcolor": "#000"
- },
- {
- "id": 1,
- "type": "ttN pipeLoader",
- "pos": [
- 8,
- 38
- ],
- "size": [
- 362.7988407910158,
- 814.6765350244143
- ],
- "flags": {},
- "order": 1,
- "mode": 0,
"outputs": [
{
"name": "pipe",
"type": "PIPE_LINE",
- "links": [
- 1,
- 5
- ],
- "shape": 3,
- "slot_index": 0
+ "links": null,
+ "shape": 3
},
{
"name": "model",
@@ -613,11 +675,17 @@
{
"name": "clip",
"type": "CLIP",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "image",
+ "type": "IMAGE",
"links": [
- 4
+ 48
],
"shape": 3,
- "slot_index": 6
+ "slot_index": 7
},
{
"name": "seed",
@@ -627,210 +695,153 @@
}
],
"properties": {
- "ttNbgOverride": ""
+ "Node name for S&R": "ttN pipeKSampler",
+ "ttNnodeVersion": "1.0.0"
},
"widgets_values": [
- "Good\\deliberate_v2.safetensors",
- "vae-ft-mse-840000-ema-pruned.safetensors",
- -1,
"None",
1,
1,
"None",
+ 2,
+ "disabled",
+ "Sample",
+ "24",
+ 8,
+ "euler_ancestral",
+ "karras",
1,
- 1,
- "None",
- 1,
- 1,
- "(RAW photo:1.2), perfect composition, (masterpiece, 8k, absurdres, best quality, intricate), realistic, raytracing, dark forest, mystical, pathway",
- "none",
- "comfy++",
- "(semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, digital art, anime, manga:1.3), out of frame, cropped, cut off, poorly made, username, signature, watermark, unattractive, blurry, boring, sketch, lacklustre, repetitive, worst quality, low quality, jpeg artefacts, poorly lit, overexposed, underexposed, glitch, error, out of focus, amateur, (poorly drawn hands, poorly drawn face:1.2), deformed iris, deformed pupils, morbid, duplicate, mutilated, extra fingers, mutated hands, poorly drawn eyes, mutation, deformed, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, incoherent, cloned face, cloned body, blur, blurry,(nsfw),(naked),(nude)",
- "none",
- "comfy++",
- 768,
- 512,
- 1,
- 302086422941626,
- "fixed"
+ "Save",
+ "Comfy",
+ 59660880884484,
+ "randomize"
],
- "color": "#222",
- "bgcolor": "#000"
+ "color": "#323",
+ "bgcolor": "#535"
},
{
- "id": 6,
- "type": "ttN pipeKSampler",
+ "id": 29,
+ "type": "Image Overlay",
"pos": [
- 397,
- 269
+ 1270,
+ 410
],
"size": [
- 337.29901354980484,
- 652.7944020996094
+ 315,
+ 290
],
"flags": {},
- "order": 3,
+ "order": 13,
"mode": 0,
"inputs": [
{
- "name": "pipe",
- "type": "PIPE_LINE",
- "link": 5
- },
- {
- "name": "optional_model",
- "type": "MODEL",
- "link": null
- },
- {
- "name": "optional_positive",
- "type": "CONDITIONING",
- "link": null
- },
- {
- "name": "optional_negative",
- "type": "CONDITIONING",
- "link": null
- },
- {
- "name": "optional_latent",
- "type": "LATENT",
- "link": null
- },
- {
- "name": "optional_vae",
- "type": "VAE",
- "link": null
- },
- {
- "name": "optional_clip",
- "type": "CLIP",
- "link": null
+ "name": "base_image",
+ "type": "IMAGE",
+ "link": 45
},
{
- "name": "script",
- "type": "SCRIPT",
- "link": null
+ "name": "overlay_image",
+ "type": "IMAGE",
+ "link": 46
},
{
- "name": "seed",
- "type": "INT",
- "link": null,
- "widget": {
- "name": "seed",
- "config": [
- "INT",
- {
- "default": 0,
- "min": 0,
- "max": 18446744073709552000
- }
- ]
- }
+ "name": "optional_mask",
+ "type": "MASK",
+ "link": 47
}
],
"outputs": [
{
- "name": "pipe",
- "type": "PIPE_LINE",
- "links": null,
- "shape": 3
- },
- {
- "name": "model",
- "type": "MODEL",
- "links": null,
- "shape": 3
- },
- {
- "name": "positive",
- "type": "CONDITIONING",
- "links": null,
- "shape": 3
- },
- {
- "name": "negative",
- "type": "CONDITIONING",
- "links": null,
- "shape": 3
- },
- {
- "name": "latent",
- "type": "LATENT",
- "links": null,
- "shape": 3
- },
- {
- "name": "vae",
- "type": "VAE",
- "links": null,
- "shape": 3
- },
+ "name": "IMAGE",
+ "type": "IMAGE",
+ "links": [
+ 51
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "Image Overlay"
+ },
+ "widgets_values": [
+ "Resize by rescale_factor",
+ "nearest-exact",
+ 0.7,
+ 512,
+ 512,
+ 270,
+ 140,
+ 0,
+ 0
+ ],
+ "color": "#232",
+ "bgcolor": "#353"
+ },
+ {
+ "id": 30,
+ "type": "ttN imageOutput",
+ "pos": [
+ 1600,
+ 410
+ ],
+ "size": [
+ 300,
+ 290
+ ],
+ "flags": {},
+ "order": 14,
+ "mode": 0,
+ "inputs": [
{
- "name": "clip",
- "type": "CLIP",
- "links": null,
- "shape": 3
- },
+ "name": "image",
+ "type": "IMAGE",
+ "link": 51
+ }
+ ],
+ "outputs": [
{
"name": "image",
"type": "IMAGE",
"links": [
- 7
+ 50
],
"shape": 3,
- "slot_index": 7
- },
- {
- "name": "seed",
- "type": "INT",
- "links": null,
- "shape": 3
+ "slot_index": 0
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN imageOutput",
+ "ttNnodeVersion": "1.0.0"
},
"widgets_values": [
- "None",
- 1,
- 1,
- "None",
- 2,
- "disabled",
- "Sample",
- "24",
- 8,
- "ddim",
- "karras",
- 1,
- "Save",
+ "Preview",
"Comfy",
- 543250852522365,
- "randomize"
+ "True",
+ "T:\\AI\\ComfyNew\\ComfyUI\\output"
],
- "color": "#323",
- "bgcolor": "#535"
+ "color": "#232",
+ "bgcolor": "#353"
},
{
- "id": 8,
+ "id": 27,
"type": "Reroute",
"pos": [
- 783,
- 39
+ 1180,
+ 40
],
"size": [
82,
26
],
"flags": {},
- "order": 6,
+ "order": 9,
"mode": 0,
"inputs": [
{
"name": "",
"type": "*",
- "link": 7
+ "link": 39
}
],
"outputs": [
@@ -838,7 +849,7 @@
"name": "IMAGE",
"type": "IMAGE",
"links": [
- 18
+ 45
],
"slot_index": 0
}
@@ -851,126 +862,194 @@
"bgcolor": "#3f5159"
},
{
- "id": 5,
- "type": "BNK_CLIPTextEncodeAdvanced",
+ "id": 35,
+ "type": "ttN hiresfixScale",
"pos": [
- 438,
- 63
+ 1920,
+ 360
],
"size": [
- 278.2790103125003,
- 150.91457917968762
+ 315,
+ 340
],
"flags": {},
- "order": 4,
+ "order": 15,
"mode": 0,
"inputs": [
{
- "name": "clip",
- "type": "CLIP",
- "link": 4
+ "name": "image",
+ "type": "IMAGE",
+ "link": 50
+ },
+ {
+ "name": "vae",
+ "type": "VAE",
+ "link": 52
}
],
"outputs": [
{
- "name": "CONDITIONING",
- "type": "CONDITIONING",
+ "name": "latent",
+ "type": "LATENT",
"links": [
- 26
+ 54
],
"shape": 3,
"slot_index": 0
+ },
+ {
+ "name": "image",
+ "type": "IMAGE",
+ "links": null,
+ "shape": 3
}
],
"properties": {
- "ttNbgOverride": "black"
+ "infoWidgetHidden": false,
+ "Node name for S&R": "ttN hiresfixScale",
+ "ttNnodeVersion": "1.0.0"
},
"widgets_values": [
- "(RAW photo:1.2), perfect composition, (masterpiece, 8k, absurdres, best quality, intricate), realistic, raytracing, horse, wide shot, side on, full body",
- "none",
- "comfy++"
+ "DF2K_JPEG.pth",
+ "Rescale based on model upscale image size ⬇",
+ true,
+ "nearest-exact",
+ "by percentage",
+ 50,
+ 512,
+ 512,
+ 1024,
+ "disabled",
+ "Hide",
+ "ComfyUI",
+ true
],
- "color": "#222",
- "bgcolor": "#000"
+ "color": "#233",
+ "bgcolor": "#355"
},
{
- "id": 11,
- "type": "Image Overlay",
+ "id": 19,
+ "type": "ttN pipeLoader",
"pos": [
- 1274,
- 411
+ 20,
+ 30
],
"size": [
- 314.4465454101562,
- 290
+ 370,
+ 815
],
"flags": {},
- "order": 13,
+ "order": 0,
"mode": 0,
- "inputs": [
+ "outputs": [
{
- "name": "base_image",
- "type": "IMAGE",
- "link": 28
+ "name": "pipe",
+ "type": "PIPE_LINE",
+ "links": [
+ 33,
+ 35
+ ],
+ "shape": 3,
+ "slot_index": 0
},
{
- "name": "overlay_image",
- "type": "IMAGE",
- "link": 29
+ "name": "model",
+ "type": "MODEL",
+ "links": null,
+ "shape": 3
},
{
- "name": "optional_mask",
- "type": "MASK",
- "link": 11
- }
- ],
- "outputs": [
+ "name": "positive",
+ "type": "CONDITIONING",
+ "links": null,
+ "shape": 3
+ },
{
- "name": "IMAGE",
- "type": "IMAGE",
+ "name": "negative",
+ "type": "CONDITIONING",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "latent",
+ "type": "LATENT",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "vae",
+ "type": "VAE",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "clip",
+ "type": "CLIP",
"links": [
- 12
+ 34
],
"shape": 3,
- "slot_index": 0
+ "slot_index": 6
+ },
+ {
+ "name": "seed",
+ "type": "INT",
+ "links": null,
+ "shape": 3
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN pipeLoader",
+ "ttNbgOverride": "",
+ "ttNnodeVersion": "1.0.0"
},
"widgets_values": [
- "Resize by rescale_factor",
- "nearest-exact",
- 0.7,
- 512,
+ "Good\\deliberate_v2.safetensors",
+ "vae-ft-mse-840000-ema-pruned.safetensors",
+ -1,
+ "None",
+ 1,
+ 1,
+ "None",
+ 1,
+ 1,
+ "None",
+ 1,
+ 1,
+ "(RAW photo:1.2), perfect composition, (masterpiece, 8k, absurdres, best quality, intricate), realistic, raytracing, dark forest, mystical, pathway",
+ "none",
+ "comfy++",
+ "(semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, digital art, anime, manga:1.3), out of frame, cropped, cut off, poorly made, username, signature, watermark, unattractive, blurry, boring, sketch, lacklustre, repetitive, worst quality, low quality, jpeg artefacts, poorly lit, overexposed, underexposed, glitch, error, out of focus, amateur, (poorly drawn hands, poorly drawn face:1.2), deformed iris, deformed pupils, morbid, duplicate, mutilated, extra fingers, mutated hands, poorly drawn eyes, mutation, deformed, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, incoherent, cloned face, cloned body, blur, blurry,(nsfw),(naked),(nude)",
+ "none",
+ "comfy++",
+ 768,
512,
- 270,
- 140,
- 0,
- 0
+ 1,
+ 302086422941626,
+ "fixed"
],
- "color": "#232",
- "bgcolor": "#353"
+ "color": "#222",
+ "bgcolor": "#000"
},
{
- "id": 12,
- "type": "ttN imageOutput",
+ "id": 28,
+ "type": "ttN imageREMBG",
"pos": [
- 1612,
- 416
+ 1040,
+ 410
],
"size": [
- 275.3463745117185,
- 281.14616737365714
+ 210,
+ 290
],
"flags": {},
- "order": 14,
+ "order": 12,
"mode": 0,
"inputs": [
{
"name": "image",
"type": "IMAGE",
- "link": 12
+ "link": 48
}
],
"outputs": [
@@ -978,187 +1057,164 @@
"name": "image",
"type": "IMAGE",
"links": [
- 30
+ 46
],
"shape": 3,
"slot_index": 0
+ },
+ {
+ "name": "mask",
+ "type": "MASK",
+ "links": [
+ 47
+ ],
+ "shape": 3,
+ "slot_index": 1
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN imageREMBG",
+ "ttNnodeVersion": "1.0.0"
},
"widgets_values": [
"Preview",
- "Comfy"
+ "ComfyUI"
],
"color": "#232",
"bgcolor": "#353"
},
{
- "id": 15,
- "type": "ttN pipeOUT",
+ "id": 31,
+ "type": "BNK_CLIPTextEncodeAdvanced",
"pos": [
- 1654,
- 15
- ],
- "size": {
- "0": 210,
- "1": 186
- },
- "flags": {
- "collapsed": true
- },
- "order": 11,
- "mode": 0,
- "inputs": [
- {
- "name": "pipe",
- "type": "PIPE_LINE",
- "link": 21
- }
- ],
- "outputs": [
- {
- "name": "model",
- "type": "MODEL",
- "links": null,
- "shape": 3
- },
- {
- "name": "pos",
- "type": "CONDITIONING",
- "links": null,
- "shape": 3
- },
- {
- "name": "neg",
- "type": "CONDITIONING",
- "links": null,
- "shape": 3
- },
- {
- "name": "latent",
- "type": "LATENT",
- "links": null,
- "shape": 3
- },
- {
- "name": "vae",
- "type": "VAE",
- "links": [
- 31
- ],
- "shape": 3,
- "slot_index": 4
- },
+ 1330,
+ 70
+ ],
+ "size": [
+ 280,
+ 150
+ ],
+ "flags": {},
+ "order": 10,
+ "mode": 0,
+ "inputs": [
{
"name": "clip",
"type": "CLIP",
- "links": null,
- "shape": 3
- },
- {
- "name": "image",
- "type": "IMAGE",
- "links": null,
- "shape": 3
- },
- {
- "name": "seed",
- "type": "INT",
- "links": null,
- "shape": 3
- },
+ "link": 43
+ }
+ ],
+ "outputs": [
{
- "name": "pipe",
- "type": "PIPE_LINE",
+ "name": "CONDITIONING",
+ "type": "CONDITIONING",
"links": [
- 23
+ 53
],
"shape": 3,
- "slot_index": 8
+ "slot_index": 0
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "BNK_CLIPTextEncodeAdvanced"
},
- "color": "#222",
- "bgcolor": "#000"
+ "widgets_values": [
+ "(RAW photo:1.2), perfect composition, (masterpiece, 8k, absurdres, best quality, intricate), realistic, raytracing, dark forest, mystical, pathway, horse, wide shot, side on, full body",
+ "none",
+ "comfy++"
+ ],
+ "color": "#432",
+ "bgcolor": "#653"
},
{
- "id": 18,
- "type": "ttN hiresfixScale",
+ "id": 23,
+ "type": "BNK_CLIPTextEncodeAdvanced",
"pos": [
- 1917,
- 193
+ 440,
+ 70
],
"size": [
- 315,
- 342
+ 280,
+ 150
],
"flags": {},
- "order": 15,
+ "order": 4,
"mode": 0,
"inputs": [
{
- "name": "image",
- "type": "IMAGE",
- "link": 30
- },
+ "name": "clip",
+ "type": "CLIP",
+ "link": 34
+ }
+ ],
+ "outputs": [
{
- "name": "vae",
- "type": "VAE",
- "link": 31
+ "name": "CONDITIONING",
+ "type": "CONDITIONING",
+ "links": [
+ 41
+ ],
+ "shape": 3,
+ "slot_index": 0
}
],
+ "properties": {
+ "Node name for S&R": "BNK_CLIPTextEncodeAdvanced"
+ },
+ "widgets_values": [
+ "(RAW photo:1.2), perfect composition, (masterpiece, 8k, absurdres, best quality, intricate), realistic, raytracing, horse, wide shot, side on, full body",
+ "none",
+ "comfy++"
+ ],
+ "color": "#432",
+ "bgcolor": "#653"
+ },
+ {
+ "id": 34,
+ "type": "EmptyLatentImage",
+ "pos": [
+ 780,
+ 110
+ ],
+ "size": [
+ 210,
+ 106
+ ],
+ "flags": {},
+ "order": 1,
+ "mode": 0,
"outputs": [
{
- "name": "latent",
+ "name": "LATENT",
"type": "LATENT",
"links": [
- 32
+ 37
],
"shape": 3,
"slot_index": 0
- },
- {
- "name": "image",
- "type": "IMAGE",
- "links": null,
- "shape": 3
}
],
"properties": {
- "infoWidgetHidden": false,
- "ttNbgOverride": "black"
+ "Node name for S&R": "EmptyLatentImage"
},
"widgets_values": [
- "DF2K_JPEG.pth",
- "Rescale based on model upscale image size ⬇",
- true,
- "nearest-exact",
- "by percentage",
- 50,
512,
512,
- "disabled",
- "Hide",
- "ComfyUI",
- true
+ 1
],
- "color": "#222",
- "bgcolor": "#000"
+ "color": "#233",
+ "bgcolor": "#355"
},
{
- "id": 16,
+ "id": 33,
"type": "ttN pipeKSampler",
"pos": [
- 2265,
- -16
+ 2250,
+ 0
],
"size": [
- 678.1099043757813,
- 931.5864779539062
+ 680,
+ 930
],
"flags": {},
"order": 16,
@@ -1167,7 +1223,7 @@
{
"name": "pipe",
"type": "PIPE_LINE",
- "link": 23
+ "link": 49
},
{
"name": "optional_model",
@@ -1177,7 +1233,7 @@
{
"name": "optional_positive",
"type": "CONDITIONING",
- "link": 25
+ "link": 53
},
{
"name": "optional_negative",
@@ -1187,7 +1243,7 @@
{
"name": "optional_latent",
"type": "LATENT",
- "link": 32
+ "link": 54
},
{
"name": "optional_vae",
@@ -1200,8 +1256,8 @@
"link": null
},
{
- "name": "script",
- "type": "SCRIPT",
+ "name": "xyPlot",
+ "type": "XYPLOT",
"link": null
},
{
@@ -1267,9 +1323,8 @@
{
"name": "image",
"type": "IMAGE",
- "links": [],
- "shape": 3,
- "slot_index": 7
+ "links": null,
+ "shape": 3
},
{
"name": "seed",
@@ -1279,7 +1334,8 @@
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN pipeKSampler",
+ "ttNnodeVersion": "1.0.0"
},
"widgets_values": [
"add_detail.safetensors",
@@ -1296,225 +1352,187 @@
0.55,
"Save",
"Comfy",
- 943608960816357,
+ 171098745667926,
"randomize"
],
"color": "#323",
"bgcolor": "#535"
- },
- {
- "id": 9,
- "type": "Reroute",
- "pos": [
- 1193,
- 40
- ],
- "size": [
- 82,
- 26
- ],
- "flags": {},
- "order": 9,
- "mode": 0,
- "inputs": [
- {
- "name": "",
- "type": "*",
- "link": 18
- }
- ],
- "outputs": [
- {
- "name": "IMAGE",
- "type": "IMAGE",
- "links": [
- 28
- ],
- "slot_index": 0
- }
- ],
- "properties": {
- "showOutputText": true,
- "horizontal": false
- },
- "color": "#2a363b",
- "bgcolor": "#3f5159"
}
],
"links": [
[
- 1,
- 1,
+ 33,
+ 19,
0,
- 2,
+ 20,
0,
"PIPE_LINE"
],
[
- 2,
- 2,
- 8,
- 3,
+ 34,
+ 19,
+ 6,
+ 23,
0,
- "PIPE_LINE"
+ "CLIP"
],
[
- 3,
- 3,
- 8,
- 4,
+ 35,
+ 19,
+ 0,
+ 24,
0,
"PIPE_LINE"
],
[
- 4,
- 1,
- 6,
- 5,
+ 36,
+ 24,
+ 7,
+ 26,
0,
- "CLIP"
+ "*"
],
[
- 5,
- 1,
- 0,
- 6,
+ 37,
+ 34,
0,
- "PIPE_LINE"
+ 25,
+ 4,
+ "LATENT"
],
[
- 6,
- 3,
+ 38,
+ 21,
8,
- 7,
+ 22,
0,
"PIPE_LINE"
],
[
- 7,
- 6,
- 7,
- 8,
+ 39,
+ 26,
+ 0,
+ 27,
0,
"*"
],
[
- 9,
- 7,
- 7,
- 10,
+ 40,
+ 20,
+ 8,
+ 21,
0,
- "IMAGE"
+ "PIPE_LINE"
],
[
- 11,
- 10,
- 1,
- 11,
+ 41,
+ 23,
+ 0,
+ 25,
2,
- "MASK"
+ "CONDITIONING"
],
[
- 12,
- 11,
- 0,
- 12,
+ 42,
+ 21,
+ 8,
+ 25,
0,
- "IMAGE"
+ "PIPE_LINE"
],
[
- 13,
- 4,
+ 43,
+ 22,
5,
- 13,
+ 31,
0,
"CLIP"
],
[
- 18,
+ 44,
+ 22,
8,
+ 32,
0,
- 9,
- 0,
- "*"
+ "PIPE_LINE"
],
[
- 21,
- 4,
- 8,
- 15,
+ 45,
+ 27,
0,
- "PIPE_LINE"
+ 29,
+ 0,
+ "IMAGE"
],
[
- 23,
- 15,
- 8,
- 16,
+ 46,
+ 28,
0,
- "PIPE_LINE"
+ 29,
+ 1,
+ "IMAGE"
],
[
- 25,
- 13,
- 0,
- 16,
+ 47,
+ 28,
+ 1,
+ 29,
2,
- "CONDITIONING"
+ "MASK"
],
[
- 26,
- 5,
- 0,
+ 48,
+ 25,
7,
- 2,
- "CONDITIONING"
+ 28,
+ 0,
+ "IMAGE"
],
[
- 27,
- 17,
+ 49,
+ 32,
+ 8,
+ 33,
0,
- 7,
- 4,
- "LATENT"
+ "PIPE_LINE"
],
[
- 28,
- 9,
+ 50,
+ 30,
0,
- 11,
+ 35,
0,
"IMAGE"
],
[
+ 51,
29,
- 10,
0,
- 11,
- 1,
- "IMAGE"
- ],
- [
30,
- 12,
- 0,
- 18,
0,
"IMAGE"
],
[
- 31,
- 15,
+ 52,
+ 32,
4,
- 18,
+ 35,
1,
"VAE"
],
[
- 32,
- 18,
+ 53,
+ 31,
+ 0,
+ 33,
+ 2,
+ "CONDITIONING"
+ ],
+ [
+ 54,
+ 35,
0,
- 16,
+ 33,
4,
"LATENT"
]
diff --git a/workflows/tinyterra_imagebash.png b/workflows/tinyterra_imagebash.png
index 0ccd696..fd1f3a3 100644
Binary files a/workflows/tinyterra_imagebash.png and b/workflows/tinyterra_imagebash.png differ
diff --git a/workflows/tinyterra_prefixParsing.png b/workflows/tinyterra_prefixParsing.png
new file mode 100644
index 0000000..310ac6f
Binary files /dev/null and b/workflows/tinyterra_prefixParsing.png differ
diff --git a/workflows/tinyterra_trueHRFix.json b/workflows/tinyterra_trueHRFix.json
index 8e4b5f7..ebf7d14 100644
--- a/workflows/tinyterra_trueHRFix.json
+++ b/workflows/tinyterra_trueHRFix.json
@@ -1,26 +1,26 @@
{
- "last_node_id": 4,
- "last_link_id": 5,
+ "last_node_id": 20,
+ "last_link_id": 10,
"nodes": [
{
- "id": 2,
+ "id": 19,
"type": "ttN pipeKSampler",
"pos": [
- 441,
+ 1222,
42
],
"size": {
- "0": 347.827392578125,
- "1": 634.4051513671875
+ "0": 496.76678466796875,
+ "1": 807.2686157226562
},
"flags": {},
- "order": 1,
+ "order": 3,
"mode": 0,
"inputs": [
{
"name": "pipe",
"type": "PIPE_LINE",
- "link": 1
+ "link": 10
},
{
"name": "optional_model",
@@ -40,7 +40,7 @@
{
"name": "optional_latent",
"type": "LATENT",
- "link": null
+ "link": 9
},
{
"name": "optional_vae",
@@ -53,8 +53,8 @@
"link": null
},
{
- "name": "script",
- "type": "SCRIPT",
+ "name": "xyPlot",
+ "type": "XYPLOT",
"link": null
},
{
@@ -78,11 +78,8 @@
{
"name": "pipe",
"type": "PIPE_LINE",
- "links": [
- 2
- ],
- "shape": 3,
- "slot_index": 0
+ "links": null,
+ "shape": 3
},
{
"name": "model",
@@ -111,11 +108,8 @@
{
"name": "vae",
"type": "VAE",
- "links": [
- 5
- ],
- "shape": 3,
- "slot_index": 5
+ "links": null,
+ "shape": 3
},
{
"name": "clip",
@@ -126,11 +120,8 @@
{
"name": "image",
"type": "IMAGE",
- "links": [
- 3
- ],
- "shape": 3,
- "slot_index": 7
+ "links": null,
+ "shape": 3
},
{
"name": "seed",
@@ -140,10 +131,11 @@
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN pipeKSampler",
+ "ttNnodeVersion": "1.0.0"
},
"widgets_values": [
- "None",
+ "add_detail.safetensors",
1,
1,
"None",
@@ -154,93 +146,38 @@
"7.000",
"dpmpp_2m",
"karras",
- 1,
- "Preview",
+ 0.5,
+ "Save",
"Comfy",
- 842792688096800,
+ 996304000155359,
"randomize"
],
"color": "#323",
"bgcolor": "#535"
},
{
- "id": 3,
- "type": "ttN pipeKSampler",
+ "id": 20,
+ "type": "ttN pipeLoader",
"pos": [
- 1222,
- 42
+ 12,
+ 40
],
"size": {
- "0": 496.76678466796875,
- "1": 807.2686157226562
+ "0": 400,
+ "1": 726
},
"flags": {},
- "order": 3,
+ "order": 0,
"mode": 0,
- "inputs": [
- {
- "name": "pipe",
- "type": "PIPE_LINE",
- "link": 2
- },
- {
- "name": "optional_model",
- "type": "MODEL",
- "link": null
- },
- {
- "name": "optional_positive",
- "type": "CONDITIONING",
- "link": null
- },
- {
- "name": "optional_negative",
- "type": "CONDITIONING",
- "link": null
- },
- {
- "name": "optional_latent",
- "type": "LATENT",
- "link": 4
- },
- {
- "name": "optional_vae",
- "type": "VAE",
- "link": null
- },
- {
- "name": "optional_clip",
- "type": "CLIP",
- "link": null
- },
- {
- "name": "script",
- "type": "SCRIPT",
- "link": null
- },
- {
- "name": "seed",
- "type": "INT",
- "link": null,
- "widget": {
- "name": "seed",
- "config": [
- "INT",
- {
- "default": 0,
- "min": 0,
- "max": 18446744073709552000
- }
- ]
- }
- }
- ],
"outputs": [
{
"name": "pipe",
"type": "PIPE_LINE",
- "links": null,
- "shape": 3
+ "links": [
+ 6
+ ],
+ "shape": 3,
+ "slot_index": 0
},
{
"name": "model",
@@ -278,12 +215,6 @@
"links": null,
"shape": 3
},
- {
- "name": "image",
- "type": "IMAGE",
- "links": null,
- "shape": 3
- },
{
"name": "seed",
"type": "INT",
@@ -292,31 +223,39 @@
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN pipeLoader",
+ "ttNnodeVersion": "1.0.0"
},
"widgets_values": [
- "add_detail.safetensors",
+ "dreamshaper_6NoVae.safetensors",
+ "vae-ft-mse-840000-ema-pruned.safetensors",
+ -1,
+ "LowRA.safetensors",
+ 0.2,
+ 0,
+ "None",
1,
1,
"None",
- 2,
- "disabled",
- "Sample",
- 24,
- "7.000",
- "dpmpp_2m",
- "karras",
- 0.5,
- "Save",
- "Comfy",
- 5130918943785,
- "randomize"
+ 1,
+ 1,
+ "warm sunrays, mountains in the background, lush green plants, hobbit city (RAW photo:1.2), perfect composition, (masterpiece, 8k, absurdres, best quality, intricate), realistic, raytracing, sharp,",
+ "none",
+ "comfy++",
+ "disconnected, merged, weird, ugly, text, signature, watermark, blurry, blurred",
+ "none",
+ "comfy++",
+ 768,
+ 512,
+ 1,
+ 278267302676039,
+ "fixed"
],
- "color": "#323",
- "bgcolor": "#535"
+ "color": "#222",
+ "bgcolor": "#000"
},
{
- "id": 4,
+ "id": 17,
"type": "ttN hiresfixScale",
"pos": [
824,
@@ -333,12 +272,14 @@
{
"name": "image",
"type": "IMAGE",
- "link": 3
+ "link": 7,
+ "slot_index": 0
},
{
"name": "vae",
"type": "VAE",
- "link": 5
+ "link": 8,
+ "slot_index": 1
}
],
"outputs": [
@@ -346,7 +287,7 @@
"name": "latent",
"type": "LATENT",
"links": [
- 4
+ 9
],
"shape": 3,
"slot_index": 0
@@ -360,7 +301,8 @@
],
"properties": {
"infoWidgetHidden": false,
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN hiresfixScale",
+ "ttNnodeVersion": "1.0.0"
},
"widgets_values": [
"DF2K_JPEG.pth",
@@ -371,6 +313,7 @@
50,
512,
512,
+ 1024,
"disabled",
"Preview",
"ComfyUI",
@@ -380,25 +323,83 @@
"bgcolor": "#355"
},
{
- "id": 1,
- "type": "ttN pipeLoader",
+ "id": 18,
+ "type": "ttN pipeKSampler",
"pos": [
- 12,
- 40
+ 441,
+ 42
],
"size": {
- "0": 400,
- "1": 726
+ "0": 347.827392578125,
+ "1": 634.4051513671875
},
"flags": {},
- "order": 0,
+ "order": 1,
"mode": 0,
+ "inputs": [
+ {
+ "name": "pipe",
+ "type": "PIPE_LINE",
+ "link": 6
+ },
+ {
+ "name": "optional_model",
+ "type": "MODEL",
+ "link": null
+ },
+ {
+ "name": "optional_positive",
+ "type": "CONDITIONING",
+ "link": null
+ },
+ {
+ "name": "optional_negative",
+ "type": "CONDITIONING",
+ "link": null
+ },
+ {
+ "name": "optional_latent",
+ "type": "LATENT",
+ "link": null
+ },
+ {
+ "name": "optional_vae",
+ "type": "VAE",
+ "link": null
+ },
+ {
+ "name": "optional_clip",
+ "type": "CLIP",
+ "link": null
+ },
+ {
+ "name": "xyPlot",
+ "type": "XYPLOT",
+ "link": null
+ },
+ {
+ "name": "seed",
+ "type": "INT",
+ "link": null,
+ "widget": {
+ "name": "seed",
+ "config": [
+ "INT",
+ {
+ "default": 0,
+ "min": 0,
+ "max": 18446744073709552000
+ }
+ ]
+ }
+ }
+ ],
"outputs": [
{
"name": "pipe",
"type": "PIPE_LINE",
"links": [
- 1
+ 10
],
"shape": 3,
"slot_index": 0
@@ -430,7 +431,9 @@
{
"name": "vae",
"type": "VAE",
- "links": null,
+ "links": [
+ 8
+ ],
"shape": 3
},
{
@@ -439,6 +442,14 @@
"links": null,
"shape": 3
},
+ {
+ "name": "image",
+ "type": "IMAGE",
+ "links": [
+ 7
+ ],
+ "shape": 3
+ },
{
"name": "seed",
"type": "INT",
@@ -447,77 +458,71 @@
}
],
"properties": {
- "ttNbgOverride": "black"
+ "Node name for S&R": "ttN pipeKSampler",
+ "ttNnodeVersion": "1.0.0"
},
"widgets_values": [
- "dreamshaper_6NoVae.safetensors",
- "vae-ft-mse-840000-ema-pruned.safetensors",
- -1,
- "LowRA.safetensors",
- 0.2,
- 0,
"None",
1,
1,
"None",
+ 2,
+ "disabled",
+ "Sample",
+ 24,
+ "7.000",
+ "dpmpp_2m",
+ "karras",
1,
- 1,
- "warm sunrays, mountains in the background, lush green plants, hobbit city (RAW photo:1.2), perfect composition, (masterpiece, 8k, absurdres, best quality, intricate), realistic, raytracing, sharp,",
- "none",
- "comfy++",
- "disconnected, merged, weird, ugly, text, signature, watermark, blurry, blurred",
- "none",
- "comfy++",
- 768,
- 512,
- 1,
- 278267302676039,
- "fixed"
+ "Preview",
+ "Comfy",
+ 641647281324617,
+ "randomize"
],
- "color": "#222",
- "bgcolor": "#000"
+ "color": "#323",
+ "bgcolor": "#535"
}
],
"links": [
[
- 1,
- 1,
- 0,
- 2,
- 0,
- "PIPE_LINE"
- ],
- [
- 2,
- 2,
+ 6,
+ 20,
0,
- 3,
+ 18,
0,
"PIPE_LINE"
],
[
- 3,
- 2,
7,
- 4,
+ 18,
+ 7,
+ 17,
0,
"IMAGE"
],
[
- 4,
- 4,
+ 8,
+ 18,
+ 5,
+ 17,
+ 1,
+ "VAE"
+ ],
+ [
+ 9,
+ 17,
0,
- 3,
+ 19,
4,
"LATENT"
],
[
- 5,
- 2,
- 5,
- 4,
- 1,
- "VAE"
+ 10,
+ 18,
+ 0,
+ 19,
+ 0,
+ "PIPE_LINE"
]
],
"groups": [],
diff --git a/workflows/tinyterra_trueHRFix.png b/workflows/tinyterra_trueHRFix.png
index 4352e32..a1a3a69 100644
Binary files a/workflows/tinyterra_trueHRFix.png and b/workflows/tinyterra_trueHRFix.png differ
diff --git a/workflows/tinyterra_xyPlot.json b/workflows/tinyterra_xyPlot.json
new file mode 100644
index 0000000..250d008
--- /dev/null
+++ b/workflows/tinyterra_xyPlot.json
@@ -0,0 +1,321 @@
+{
+ "last_node_id": 23,
+ "last_link_id": 15,
+ "nodes": [
+ {
+ "id": 21,
+ "type": "ttN xyPlot",
+ "pos": [
+ 650,
+ 210
+ ],
+ "size": {
+ "0": 300,
+ "1": 300
+ },
+ "flags": {},
+ "order": 0,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "xyPlot",
+ "type": "XYPLOT",
+ "links": [
+ 15
+ ],
+ "shape": 3,
+ "slot_index": 0
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ttN xyPlot",
+ "ttNnodeVersion": "1.0.0"
+ },
+ "widgets_values": [
+ 0,
+ 0,
+ "False",
+ "loader: ckpt_name",
+ "Good\\deliberate_v2.safetensors; dreamshaper_6NoVae.safetensors; landscapePhotoreal_v1.safetensors; ",
+ "sampler: seed",
+ "increment; increment; increment; increment; "
+ ],
+ "color": "#233",
+ "bgcolor": "#355"
+ },
+ {
+ "id": 22,
+ "type": "ttN pipeLoader",
+ "pos": [
+ 200,
+ 70
+ ],
+ "size": [
+ 400,
+ 740
+ ],
+ "flags": {},
+ "order": 1,
+ "mode": 0,
+ "outputs": [
+ {
+ "name": "pipe",
+ "type": "PIPE_LINE",
+ "links": [
+ 13
+ ],
+ "shape": 3,
+ "slot_index": 0
+ },
+ {
+ "name": "model",
+ "type": "MODEL",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "positive",
+ "type": "CONDITIONING",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "negative",
+ "type": "CONDITIONING",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "latent",
+ "type": "LATENT",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "vae",
+ "type": "VAE",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "clip",
+ "type": "CLIP",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "seed",
+ "type": "INT",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ttN pipeLoader",
+ "ttNnodeVersion": "1.0.0"
+ },
+ "widgets_values": [
+ "dreamshaper_6NoVae.safetensors",
+ "vae-ft-mse-840000-ema-pruned.safetensors",
+ -1,
+ "None",
+ 0.2,
+ 0.2,
+ "None",
+ 1,
+ 1,
+ "None",
+ 1,
+ 1,
+ "warm sunrays, mountains in the background, lush green plants, hobbit city (RAW photo:1.2), perfect composition, (masterpiece, 8k, absurdres, best quality, intricate), realistic, raytracing, sharp,",
+ "none",
+ "comfy++",
+ "disconnected, merged, weird, ugly, text, signature, watermark, blurry, blurred",
+ "none",
+ "comfy++",
+ 768,
+ 512,
+ 1,
+ 278267302676039,
+ "fixed"
+ ],
+ "color": "#222",
+ "bgcolor": "#000"
+ },
+ {
+ "id": 23,
+ "type": "ttN pipeKSampler",
+ "pos": [
+ 1000,
+ 70
+ ],
+ "size": [
+ 720,
+ 1080
+ ],
+ "flags": {},
+ "order": 2,
+ "mode": 0,
+ "inputs": [
+ {
+ "name": "pipe",
+ "type": "PIPE_LINE",
+ "link": 13
+ },
+ {
+ "name": "optional_model",
+ "type": "MODEL",
+ "link": null
+ },
+ {
+ "name": "optional_positive",
+ "type": "CONDITIONING",
+ "link": null
+ },
+ {
+ "name": "optional_negative",
+ "type": "CONDITIONING",
+ "link": null
+ },
+ {
+ "name": "optional_latent",
+ "type": "LATENT",
+ "link": null
+ },
+ {
+ "name": "optional_vae",
+ "type": "VAE",
+ "link": null
+ },
+ {
+ "name": "optional_clip",
+ "type": "CLIP",
+ "link": null
+ },
+ {
+ "name": "xyPlot",
+ "type": "XYPLOT",
+ "link": 15
+ },
+ {
+ "name": "seed",
+ "type": "INT",
+ "link": null,
+ "widget": {
+ "name": "seed",
+ "config": [
+ "INT",
+ {
+ "default": 0,
+ "min": 0,
+ "max": 18446744073709552000
+ }
+ ]
+ }
+ }
+ ],
+ "outputs": [
+ {
+ "name": "pipe",
+ "type": "PIPE_LINE",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "model",
+ "type": "MODEL",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "positive",
+ "type": "CONDITIONING",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "negative",
+ "type": "CONDITIONING",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "latent",
+ "type": "LATENT",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "vae",
+ "type": "VAE",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "clip",
+ "type": "CLIP",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "image",
+ "type": "IMAGE",
+ "links": null,
+ "shape": 3
+ },
+ {
+ "name": "seed",
+ "type": "INT",
+ "links": null,
+ "shape": 3
+ }
+ ],
+ "properties": {
+ "Node name for S&R": "ttN pipeKSampler",
+ "ttNnodeVersion": "1.0.0"
+ },
+ "widgets_values": [
+ "None",
+ 1,
+ 1,
+ "None",
+ 2,
+ "disabled",
+ "Sample",
+ 24,
+ "7.000",
+ "dpmpp_2m",
+ "karras",
+ 1,
+ "Save",
+ "Comfy",
+ 861410848433374,
+ "randomize"
+ ],
+ "color": "#323",
+ "bgcolor": "#535"
+ }
+ ],
+ "links": [
+ [
+ 13,
+ 22,
+ 0,
+ 23,
+ 0,
+ "PIPE_LINE"
+ ],
+ [
+ 15,
+ 21,
+ 0,
+ 23,
+ 7,
+ "XYPLOT"
+ ]
+ ],
+ "groups": [],
+ "config": {},
+ "extra": {},
+ "version": 0.4
+}
\ No newline at end of file
diff --git a/workflows/tinyterra_xyPlot.png b/workflows/tinyterra_xyPlot.png
new file mode 100644
index 0000000..53d3655
Binary files /dev/null and b/workflows/tinyterra_xyPlot.png differ