@@ -363,25 +363,29 @@ def __init__(self):
363
363
self .embedding_provider = "sentence_transformers"
364
364
self .llm_model = ""
365
365
self .embedding_model = ""
366
- self .assistant = "Cortana "
366
+ self .assistant = "None "
367
367
self .random = False
368
- self . rag_dir = os . path . join ( folder_paths . base_path , "custom_nodes" , "ComfyUI_IF_AI_tools" , "IF_AI" , "rag" )
368
+
369
369
self .comfy_dir = os .path .dirname (os .path .dirname (os .path .dirname (os .path .abspath (__file__ ))))
370
+ self .rag_dir = os .path .join (folder_paths .base_path , "custom_nodes" , "ComfyUI_IF_AI_tools" , "IF_AI" , "rag" )
370
371
self .presets_dir = os .path .join (folder_paths .base_path , "custom_nodes" , "ComfyUI_IF_AI_tools" , "IF_AI" , "presets" )
372
+
371
373
self .stop_file = os .path .join (self .presets_dir , "stop_strings.json" )
372
374
self .assistants_file = os .path .join (self .presets_dir , "assistants.json" )
373
375
self .neg_prompts_file = os .path .join (self .presets_dir , "neg_prompts.json" )
374
376
self .embellish_prompts_file = os .path .join (self .presets_dir , "embellishments.json" )
375
377
self .style_prompts_file = os .path .join (self .presets_dir , "style_prompts.json" )
376
378
self .tasks_file = os .path .join (self .presets_dir , "florence_prompts.json" )
377
379
self .agents_dir = os .path .join (self .presets_dir , "agents" )
380
+
378
381
self .agent_tools = self .load_agent_tools ()
379
382
self .stop_strings = self .load_presets (self .stop_file )
380
383
self .assistants = self .load_presets (self .assistants_file )
381
384
self .neg_prompts = self .load_presets (self .neg_prompts_file )
382
385
self .embellish_prompts = self .load_presets (self .embellish_prompts_file )
383
386
self .style_prompts = self .load_presets (self .style_prompts_file )
384
387
self .florence_prompts = self .load_presets (self .tasks_file )
388
+
385
389
self .keep_alive = False
386
390
self .seed = 94687328150
387
391
self .messages = []
@@ -394,8 +398,6 @@ def __init__(self):
394
398
self .colpali_app = colpaliRAGapp ()
395
399
self .fix_json = True
396
400
self .cached_colpali_model = None
397
- #self.transformers_manager = TransformersModelManager()
398
- #self.transformers_app = self.transformers_manager.send_transformers_request
399
401
self .florence_app = FlorenceModule ()
400
402
self .florence_models = {}
401
403
self .query_type = "global"
@@ -411,9 +413,8 @@ def __init__(self):
411
413
self .top_k_search = 3
412
414
413
415
self .placeholder_image_path = os .path .join (folder_paths .base_path , "custom_nodes" , "ComfyUI_IF_AI_tools" , "IF_AI" , "placeholder.png" )
414
- # Ensure the placeholder image exists
416
+
415
417
if not os .path .exists (self .placeholder_image_path ):
416
- # Create a proper RGB placeholder image
417
418
placeholder = Image .new ('RGB' , (512 , 512 ), color = (73 , 109 , 137 ))
418
419
os .makedirs (os .path .dirname (self .placeholder_image_path ), exist_ok = True )
419
420
placeholder .save (self .placeholder_image_path )
@@ -424,29 +425,34 @@ def load_presets(self, file_path):
424
425
return presets
425
426
426
427
def load_agent_tools (self ):
428
+ os .makedirs (self .agents_dir , exist_ok = True )
427
429
agent_tools = {}
428
- for filename in os .listdir (self .agents_dir ):
429
- if filename .endswith ('.json' ):
430
- full_path = os .path .join (self .agents_dir , filename )
431
- with open (full_path , 'r' ) as f :
432
- try :
433
- data = json .load (f )
434
- if 'output_type' not in data :
435
- data ['output_type' ] = None
436
- agent_tool = AgentTool (** data )
437
- agent_tool .load ()
438
- if agent_tool ._class_instance is not None :
439
- if agent_tool .python_function :
440
- agent_tools [agent_tool .name ] = agent_tool
430
+ try :
431
+ for filename in os .listdir (self .agents_dir ):
432
+ if filename .endswith ('.json' ):
433
+ full_path = os .path .join (self .agents_dir , filename )
434
+ with open (full_path , 'r' ) as f :
435
+ try :
436
+ data = json .load (f )
437
+ if 'output_type' not in data :
438
+ data ['output_type' ] = None
439
+ agent_tool = AgentTool (** data )
440
+ agent_tool .load ()
441
+ if agent_tool ._class_instance is not None :
442
+ if agent_tool .python_function :
443
+ agent_tools [agent_tool .name ] = agent_tool
444
+ else :
445
+ print (f"Warning: Agent tool { agent_tool .name } in { filename } does not have a python_function defined." )
441
446
else :
442
- print (f"Warning: Agent tool { agent_tool .name } in { filename } does not have a python_function defined." )
443
- else :
444
- print (f"Failed to create class instance for { filename } " )
445
- except json .JSONDecodeError :
446
- print (f"Error: Invalid JSON in { filename } " )
447
- except Exception as e :
448
- print (f"Error loading { filename } : { str (e )} " )
449
- return agent_tools
447
+ print (f"Failed to create class instance for { filename } " )
448
+ except json .JSONDecodeError :
449
+ print (f"Error: Invalid JSON in { filename } " )
450
+ except Exception as e :
451
+ print (f"Error loading { filename } : { str (e )} " )
452
+ return agent_tools
453
+ except Exception as e :
454
+ print (f"Warning: Error accessing agent tools directory: { str (e )} " )
455
+ return {}
450
456
451
457
async def process_chat (
452
458
self ,
@@ -648,7 +654,7 @@ async def process_chat(
648
654
)
649
655
650
656
generated_text = response .get ("Response" )
651
- selected_neg_prompt_name = neg_prompt # The name/key selected in the UI
657
+ selected_neg_prompt_name = neg_prompt
652
658
omni = response .get ("Tool_Output" )
653
659
retrieved_image = response .get ("Retrieved_Image" )
654
660
retrieved_mask = response .get ("Mask" )
@@ -677,21 +683,17 @@ async def process_chat(
677
683
# Handle negative prompts
678
684
if selected_neg_prompt_name == "AI_Fill" :
679
685
try :
680
- # Get the NegativePromptEngineer system message
681
686
neg_system_message = self .assistants .get ("NegativePromptEngineer" )
682
687
if not neg_system_message :
683
688
logger .error ("NegativePromptEngineer not found in assistants configuration" )
684
689
negative_prompt = "Error: NegativePromptEngineer not configured"
685
690
else :
686
- # Construct a clear prompt for negative generation
687
691
user_message = f"Generate negative prompts for the following prompt:\n { text_result } "
688
692
689
- # Ensure we have a valid system message
690
693
system_message_str = json .dumps (neg_system_message )
691
694
692
695
logger .info (f"Requesting negative prompts for prompt: { text_result [:100 ]} ..." )
693
696
694
- # Make the LLM request with proper parameter handling
695
697
neg_response = await send_request (
696
698
llm_provider = llm_provider ,
697
699
base_ip = base_ip ,
@@ -716,19 +718,16 @@ async def process_chat(
716
718
logger .debug (f"Received negative prompt response: { neg_response } " )
717
719
718
720
if neg_response :
719
- # Split the AI-generated negative prompts into lines
720
721
negative_lines = []
721
722
for line in neg_response .split ('\n ' ):
722
723
line = line .strip ()
723
724
if line :
724
725
negative_lines .append (line )
725
726
726
- # Match number of negative prompts to positive prompts
727
727
while len (negative_lines ) < len (lines ):
728
728
negative_lines .append (negative_lines [- 1 ] if negative_lines else "" )
729
729
negative_lines = negative_lines [:len (lines )]
730
730
731
- # Create multiline string with explicit newlines
732
731
negative_prompt = "\n " .join (negative_lines )
733
732
else :
734
733
negative_prompt = "Error: Empty response from LLM"
@@ -737,7 +736,6 @@ async def process_chat(
737
736
negative_prompt = f"Error generating negative prompts: { str (e )} "
738
737
739
738
elif neg_prompt != "None" :
740
- # Create a negative prompt for each line
741
739
neg_content = self .neg_prompts .get (neg_prompt , "" ).strip ()
742
740
negative_lines = [neg_content for _ in range (len (lines ))]
743
741
negative_prompt = "\n " .join (negative_lines )
@@ -749,7 +747,6 @@ async def process_chat(
749
747
negative_prompt = ""
750
748
751
749
try :
752
- # Check if retrieved_image is already a tensor in ComfyUI format
753
750
if isinstance (retrieved_image , torch .Tensor ):
754
751
# Ensure it's in the correct format (B, C, H, W)
755
752
if retrieved_image .dim () == 3 : # Single image (C, H, W)
@@ -778,18 +775,14 @@ async def process_chat(
778
775
# Process retrieved_mask if it's not a tensor
779
776
mask_tensor = process_mask (retrieved_mask , image_tensor )
780
777
else :
781
- # Process the retrieved image using process_images_for_comfy
782
778
image_tensor , default_mask_tensor = process_images_for_comfy (
783
779
retrieved_image ,
784
780
self .placeholder_image_path
785
781
)
786
782
mask_tensor = default_mask_tensor
787
783
788
784
if retrieved_mask is not None :
789
- # Process retrieved_mask to ensure it's in the correct format
790
785
mask_tensor = process_mask (retrieved_mask , image_tensor )
791
-
792
- # Now image_tensor and mask_tensor are ready to be used
793
786
return (
794
787
prompt ,
795
788
combined_prompt ,
0 commit comments