Skip to content

Commit

Permalink
v0.2.6
Browse files Browse the repository at this point in the history
  • Loading branch information
shadowcz007 committed Dec 6, 2023
1 parent f847dcc commit a6cc907
Show file tree
Hide file tree
Showing 11 changed files with 1,709 additions and 260 deletions.
13 changes: 11 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,11 +1,20 @@
##

v0.2.6 🚀🚗🚚🏃‍

- [Add getting camera video stream](./workflow/7-camera-workflow.json)
- Add a slider to the floating window, which can be used as input for denoise
- OSupport for calling multiple GPTs


![screenshare](./assets/screenshare.png)


### ScreenShareNode & FloatingVideoNode
> Now comfyui supports capturing screen pixel streams from any software and can be used for LCM-Lora integration. Let's get started with implementation and design! 💻🌐
>
https://github.com/shadowcz007/comfyui-mixlab-nodes/assets/12645064/e7e77f90-e43e-410a-ab3a-1952b7b4e7da


Expand All @@ -16,15 +25,15 @@ https://github.com/shadowcz007/comfyui-mixlab-nodes/assets/12645064/e7e77f90-e43


### LoadImagesFromLocal
> Monitor changes to images in a local folder, and trigger real-time execution of workflows, supporting common image formats, especially PSD format, in conjunction with Photoshop. Q: Translate into English
> Monitor changes to images in a local folder, and trigger real-time execution of workflows, supporting common image formats, especially PSD format, in conjunction with Photoshop.
![watch](./assets/4-loadfromlocal-watcher-workflow.svg)

[workflow-4](./workflow/4-loadfromlocal-watcher-workflow.json)


### GPT
> ChatGPT、ChatGLM3 , Some code provided by rui. If you are using OpenAI's service, fill in https://api.openai.com/v1 . If you are using a local LLM service, fill in http://127.0.0.1:xxxx/v1
>Support for calling multiple GPTs.ChatGPT、ChatGLM3 , Some code provided by rui. If you are using OpenAI's service, fill in https://api.openai.com/v1 . If you are using a local LLM service, fill in http://127.0.0.1:xxxx/v1 . Azure OpenAI:https://xxxx.openai.azure.com

![gpt-workflow.svg](./assets/gpt-workflow.svg)
Expand Down
24 changes: 18 additions & 6 deletions __init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,18 @@ def is_installed(package, package_overwrite=None):
sys.exit()



def install_openai():
# Helper function to install the OpenAI module if not already installed
try:
importlib.import_module('openai')
except ImportError:
import pip
pip.main(['install', 'openai'])

install_openai()


current_path = os.path.abspath(os.path.dirname(__file__))


Expand Down Expand Up @@ -281,21 +293,21 @@ def new_add_routes(self):
"FloatingVideo":FloatingVideo,
"CLIPSeg_":CLIPSeg,
"CombineMasks_":CombineMasks,
"ChatGPT":ChatGPTNode,
"ChatGPTOpenAI":ChatGPTNode,
"ShowTextForGPT":ShowTextForGPT,
"CharacterInText":CharacterInText
}

# 一个包含节点友好/可读的标题的字典
NODE_DISPLAY_NAME_MAPPINGS = {
"RandomPrompt": "Random Prompt #Mixlab",
"RandomPrompt": "Random Prompt ♾️Mixlab",
"SplitLongMask":"Splitting a long image into sections",
"VAELoaderConsistencyDecoder":"Consistency Decoder Loader",
"VAEDecodeConsistencyDecoder":"Consistency Decoder Decode",
"ScreenShare":"ScreenShare #Mixlab",
"FloatingVideo":"FloatingVideo #Mixlab",
"ChatGPT":"ChatGPT #Mixlab",
"ShowTextForGPT":"ShowTextForGPT #Mixlab"
"ScreenShare":"ScreenShare ♾️Mixlab",
"FloatingVideo":"FloatingVideo ♾️Mixlab",
"ChatGPTOpenAI":"ChatGPT ♾️Mixlab",
"ShowTextForGPT":"ShowTextForGPT ♾️Mixlab"
}

# web ui的节点功能
Expand Down
23 changes: 20 additions & 3 deletions nodes/ChatGPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,17 +75,21 @@ def INPUT_TYPES(cls):
"required": {
"api_key":("KEY", {"default": "", "multiline": True}),
"api_url":("URL", {"default": "", "multiline": True}),
"prompt": ("STRING", {"default": "", "multiline": True}),
"prompt": ("STRING", {"multiline": True}),
"system_content": ("STRING",
{
"default": "You are ChatGPT, a large language model trained by OpenAI. Answer as concisely as possible.",
"multiline": True
}),
"model": (["gpt-3.5-turbo", "gpt-3.5-turbo-16k-0613", "gpt-4-0613","gpt-4-1106-preview"],
"model": (["gpt-3.5-turbo","gpt-35-turbo","gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-4-0613","gpt-4-1106-preview"],
{"default": "gpt-3.5-turbo"}),
"seed": ("INT", {"default": 0, "min": 0, "max": 10000, "step": 1}),
"context_size":("INT", {"default": 1, "min": 0, "max":30, "step": 1}),
},
"hidden": {
"unique_id": "UNIQUE_ID",
"extra_pnginfo": "EXTRA_PNGINFO",
},
}

RETURN_TYPES = ("STRING","STRING","STRING",)
Expand All @@ -102,7 +106,7 @@ def generate_contextual_text(self,
prompt,
system_content,
model,
seed,context_size):
seed,context_size,unique_id = None, extra_pnginfo=None):
# print(api_key!='',api_url,prompt,system_content,model,seed)
# 可以选择保留会话历史以维持上下文记忆
# 或者在此处清除会话历史 self.session_history.clear()
Expand Down Expand Up @@ -140,6 +144,19 @@ def crop_list_tail(lst, size):
response_content = chat(client,model,messages)

self.session_history=self.session_history+[{"role": "user", "content": prompt}]+[{'role':'assistant',"content":response_content}]


# if unique_id and extra_pnginfo and "workflow" in extra_pnginfo[0]:
# workflow = extra_pnginfo[0]["workflow"]
# node = next((x for x in workflow["nodes"] if str(x["id"]) == unique_id[0]), None)
# if node:
# node["widgets_values"] = ["",
# api_url,
# prompt,
# system_content,
# model,
# seed,
# context_size]

return (response_content,json.dumps(messages, indent=4),json.dumps(self.session_history, indent=4),)

Expand Down
23 changes: 18 additions & 5 deletions nodes/ScreenShareNode.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,23 +82,24 @@ def INPUT_TYPES(s):
},
"optional":{
"prompt": ("PROMPT",),
"slide": ("SLIDE",),
# "seed": ("INT", {"default": 1, "min": 0, "max": 0xffffffffffffffff}),
} }

RETURN_TYPES = ('IMAGE','STRING')

RETURN_TYPES = ('IMAGE','STRING','FLOAT')
RETURN_NAMES = ("IMAGE","PROMPT","FLOAT",)
FUNCTION = "run"

CATEGORY = "♾️Mixlab/image"

# INPUT_IS_LIST = True
OUTPUT_IS_LIST = (False,False,False)
OUTPUT_IS_LIST = (False,False,False,False)

# 运行的函数
def run(self,image_base64,prompt):
def run(self,image_base64,prompt,slide):
im,mask=base64_save(image_base64)
# print('##########prompt',prompt)
return (im,prompt)
return (im,prompt,slide)


class FloatingVideo:
Expand Down Expand Up @@ -137,3 +138,15 @@ def run(self,images):

return { "ui": { "images_": results } }



# class SildeNode:
# CATEGORY = "quicknodes"
# @classmethod
# def INPUT_TYPES(s):
# return { "required":{} }
# RETURN_TYPES = ()
# RETURN_NAMES = ()
# FUNCTION = "func"
# def func(self):
# return ()
12 changes: 8 additions & 4 deletions web/javascript/checkVersion_mixlab.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,20 @@ import { app } from '../../../scripts/app.js'
const repoOwner = 'shadowcz007' // 替换为仓库的所有者
const repoName = 'comfyui-mixlab-nodes' // 替换为仓库的名称

const version = 'v0.2.5.2'
const version = 'v0.2.6'

fetch(`https://api.github.com/repos/${repoOwner}/${repoName}/releases/latest`)
.then(response => response.json())
.then(data => {
const latestVersion = data.tag_name
console.log('Latest release version:', latestVersion)
// if (latestVersion === localStorage.getItem('_mixlab_nodes_vesion')) return
if (latestVersion != version) {
// localStorage.setItem('_mixlab_nodes_vesion', latestVersion)
if (
latestVersion &&
latestVersion === localStorage.getItem('_mixlab_nodes_vesion')
)
return
if (latestVersion && latestVersion != version) {
localStorage.setItem('_mixlab_nodes_vesion', latestVersion)
app.ui.dialog.show(`<h4 style="font-size: 18px;">${repoName} <br>
Latest release version: ${latestVersion}</h4>
<p>Please proceed to the official repository to download the latest version.</p>
Expand Down
Loading

0 comments on commit a6cc907

Please sign in to comment.