-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathclip_combiner.py
124 lines (106 loc) · 4.66 KB
/
clip_combiner.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
import comfy.sd
import folder_paths
import torch
import copy
import comfy.model_management
class CLIPSplitter:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"clip": ("CLIP",),
}}
RETURN_TYPES = ("CLIP_L", "CLIP_G", "CLIP_T5")
RETURN_NAMES = ("CLIP_L", "CLIP_G", "T5")
FUNCTION = "split_clip"
CATEGORY = "cel_model/clip"
def split_clip(self, clip):
# CLIPの種類を判定
if hasattr(clip.cond_stage_model, 'clip_l') and hasattr(clip.cond_stage_model, 'clip_g'):
if hasattr(clip.cond_stage_model, 't5xxl'): # SD3の場合
splits = [
self._create_single_clip(clip, 'clip_l'),
self._create_single_clip(clip, 'clip_g'),
self._create_single_clip(clip, 't5xxl')
]
else: # SDXLの場合
splits = [
self._create_single_clip(clip, 'clip_l'),
self._create_single_clip(clip, 'clip_g'),
None
]
else:
# 単一のCLIPモデルの場合
splits = [clip, None, None]
# 返したCLIPの種類をログに出力
for i, split in enumerate(splits):
if split is not None:
print(f"Returned CLIP {i+1}: {type(split.cond_stage_model)}")
return tuple(splits)
def _create_single_clip(self, clip, model_type):
if getattr(clip.cond_stage_model, model_type, None) is None:
return None
new_clip = clip.clone()
new_clip.cond_stage_model = getattr(clip.cond_stage_model, model_type)
return new_clip
class CLIPCombiner:
@classmethod
def INPUT_TYPES(s):
return {"required": {
"clip_l": ("CLIP_L",),
"model_type": (["sdxl", "sd3"], {"default": "sdxl"}),
},
"optional": {
"clip_g": ("CLIP_G",),
"clip_t5": ("CLIP_T5",),
}}
RETURN_TYPES = ("CLIP",)
FUNCTION = "combine_clip"
CATEGORY = "cel_model/clip"
def combine_clip(self, clip_l, model_type, clip_g=None, clip_t5=None):
device = "cpu" # 初期化時はCPUで
try:
# エンベディングディレクトリの取得
embedding_directory = None
if hasattr(clip_l.tokenizer, 'clip_l'):
embedding_directory = clip_l.tokenizer.clip_l.embedding_directory
elif hasattr(clip_l.tokenizer, 'l'):
embedding_directory = clip_l.tokenizer.l.embedding_directory
if model_type == "sdxl":
if clip_g is None:
raise ValueError("SDXL requires both CLIP_L and CLIP_G")
from comfy.sdxl_clip import SDXLClipModel, SDXLTokenizer
combined_model = SDXLClipModel(device=device)
combined_model.clip_l = clip_l.cond_stage_model
combined_model.clip_g = clip_g.cond_stage_model
# SDXLのトークナイザー設定
new_clip = clip_l.clone()
new_clip.cond_stage_model = combined_model
new_clip.tokenizer = SDXLTokenizer(embedding_directory=embedding_directory)
elif model_type == "sd3":
from comfy.text_encoders.sd3_clip import SD3ClipModel, SD3Tokenizer
# メモリ管理のために一時的にモデルをアンロード
comfy.model_management.unload_all_models()
has_t5 = clip_t5 is not None
combined_model = SD3ClipModel(
clip_l=True,
clip_g=clip_g is not None,
t5=has_t5,
device=device
)
combined_model.clip_l = clip_l.cond_stage_model
if clip_g is not None:
combined_model.clip_g = clip_g.cond_stage_model
if has_t5:
combined_model.t5xxl = clip_t5.cond_stage_model
# SD3用の新しいCLIPオブジェクトを作成
new_clip = clip_l.clone()
new_clip.cond_stage_model = combined_model
new_clip.tokenizer = SD3Tokenizer(embedding_directory=embedding_directory)
# 必要に応じてデバイスに移動
target_device = comfy.model_management.intermediate_device()
if target_device != "cpu":
new_clip.cond_stage_model.to(target_device)
return (new_clip,)
except Exception as e:
comfy.model_management.unload_all_models()
raise e