openfree commited on
Commit
3349f3f
·
verified ·
1 Parent(s): db40b19

Delete app-backup.py

Browse files
Files changed (1) hide show
  1. app-backup.py +0 -450
app-backup.py DELETED
@@ -1,450 +0,0 @@
1
- import os
2
- import gc
3
- import uuid
4
- import random
5
- import tempfile
6
- import time
7
- from datetime import datetime
8
- from typing import Any
9
- from huggingface_hub import login, hf_hub_download
10
- import spaces
11
-
12
- import gradio as gr
13
- import numpy as np
14
- import torch
15
- from PIL import Image, ImageDraw, ImageFont
16
- from diffusers import FluxPipeline
17
- from transformers import pipeline
18
-
19
- # 메모리 정리 함수
20
- def clear_memory():
21
- gc.collect()
22
- try:
23
- if torch.cuda.is_available():
24
- with torch.cuda.device(0):
25
- torch.cuda.empty_cache()
26
- except:
27
- pass
28
-
29
- # GPU 설정
30
- device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
31
-
32
- if torch.cuda.is_available():
33
- try:
34
- with torch.cuda.device(0):
35
- torch.cuda.empty_cache()
36
- torch.backends.cudnn.benchmark = True
37
- torch.backends.cuda.matmul.allow_tf32 = True
38
- except:
39
- print("Warning: Could not configure CUDA settings")
40
-
41
- # HF 토큰 설정
42
- HF_TOKEN = os.getenv("HF_TOKEN")
43
- if HF_TOKEN is None:
44
- raise ValueError("Please set the HF_TOKEN environment variable")
45
-
46
- try:
47
- login(token=HF_TOKEN)
48
- except Exception as e:
49
- raise ValueError(f"Failed to login to Hugging Face: {str(e)}")
50
-
51
-
52
-
53
- translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en", device=-1) # CPU에서 실행
54
-
55
- def translate_to_english(text: str) -> str:
56
- """한글 텍스트를 영어로 번역"""
57
- try:
58
- if any(ord('가') <= ord(char) <= ord('힣') for char in text):
59
- translated = translator(text, max_length=128)[0]['translation_text']
60
- print(f"Translated '{text}' to '{translated}'")
61
- return translated
62
- return text
63
- except Exception as e:
64
- print(f"Translation error: {str(e)}")
65
- return text
66
-
67
-
68
- # FLUX 파이프라인 초기화 부분 수정
69
- print("Initializing FLUX pipeline...")
70
- try:
71
- pipe = FluxPipeline.from_pretrained(
72
- "black-forest-labs/FLUX.1-dev",
73
- torch_dtype=torch.float16,
74
- use_auth_token=HF_TOKEN
75
- )
76
- print("FLUX pipeline initialized successfully")
77
-
78
- # 메모리 최적화 설정
79
- pipe.enable_attention_slicing(slice_size=1)
80
-
81
- # GPU 설정
82
- if torch.cuda.is_available():
83
- pipe = pipe.to("cuda:0")
84
- torch.cuda.empty_cache()
85
- torch.backends.cudnn.benchmark = True
86
- torch.backends.cuda.matmul.allow_tf32 = True
87
-
88
- print("Pipeline optimization settings applied")
89
-
90
- except Exception as e:
91
- print(f"Error initializing FLUX pipeline: {str(e)}")
92
- raise
93
-
94
- # LoRA 가중치 로드 부분 수정
95
- print("Loading LoRA weights...")
96
- try:
97
- # 로컬 LoRA 파일의 절대 경로 확인
98
- current_dir = os.path.dirname(os.path.abspath(__file__))
99
- lora_path = os.path.join(current_dir, "myt-flux-fantasy.safetensors")
100
-
101
- if not os.path.exists(lora_path):
102
- raise FileNotFoundError(f"LoRA file not found at: {lora_path}")
103
-
104
- print(f"Loading LoRA weights from: {lora_path}")
105
-
106
- # LoRA 가중치 로드
107
- pipe.load_lora_weights(lora_path)
108
- pipe.fuse_lora(lora_scale=0.75) # lora_scale 값 조정
109
-
110
- # 메모리 정리
111
- torch.cuda.empty_cache()
112
- gc.collect()
113
-
114
- print("LoRA weights loaded and fused successfully")
115
- print(f"Current device: {pipe.device}")
116
-
117
- except Exception as e:
118
- print(f"Error loading LoRA weights: {str(e)}")
119
- print(f"Full error details: {repr(e)}")
120
- raise ValueError(f"Failed to load LoRA weights: {str(e)}")
121
-
122
-
123
- @spaces.GPU(duration=60)
124
- def generate_image(
125
- prompt: str,
126
- seed: int,
127
- randomize_seed: bool,
128
- width: int,
129
- height: int,
130
- guidance_scale: float,
131
- num_inference_steps: int,
132
- progress: gr.Progress = gr.Progress()
133
- ):
134
- try:
135
- clear_memory()
136
-
137
- translated_prompt = translate_to_english(prompt)
138
- print(f"Processing prompt: {translated_prompt}")
139
-
140
- if randomize_seed:
141
- seed = random.randint(0, MAX_SEED)
142
-
143
- generator = torch.Generator(device=device).manual_seed(seed)
144
-
145
- print(f"Current device: {pipe.device}")
146
- print(f"Starting image generation...")
147
-
148
- with torch.inference_mode(), torch.cuda.amp.autocast(enabled=True):
149
- image = pipe(
150
- prompt=translated_prompt,
151
- width=width,
152
- height=height,
153
- num_inference_steps=num_inference_steps,
154
- guidance_scale=guidance_scale,
155
- generator=generator,
156
- num_images_per_prompt=1,
157
- ).images[0]
158
-
159
- filepath = save_generated_image(image, translated_prompt)
160
- print(f"Image generated and saved to: {filepath}")
161
- return image, seed
162
-
163
- except Exception as e:
164
- print(f"Generation error: {str(e)}")
165
- print(f"Full error details: {repr(e)}")
166
- raise gr.Error(f"Image generation failed: {str(e)}")
167
- finally:
168
- clear_memory()
169
-
170
- # 저장 디렉토리 설정
171
- SAVE_DIR = "saved_images"
172
- if not os.path.exists(SAVE_DIR):
173
- os.makedirs(SAVE_DIR, exist_ok=True)
174
-
175
- MAX_SEED = np.iinfo(np.int32).max
176
- MAX_IMAGE_SIZE = 1024
177
-
178
- def save_generated_image(image, prompt):
179
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
180
- unique_id = str(uuid.uuid4())[:8]
181
- filename = f"{timestamp}_{unique_id}.png"
182
- filepath = os.path.join(SAVE_DIR, filename)
183
- image.save(filepath)
184
- return filepath
185
-
186
-
187
-
188
- def add_text_with_stroke(draw, text, x, y, font, text_color, stroke_width):
189
- """텍스트에 외곽선을 추가하는 함수"""
190
- for adj_x in range(-stroke_width, stroke_width + 1):
191
- for adj_y in range(-stroke_width, stroke_width + 1):
192
- draw.text((x + adj_x, y + adj_y), text, font=font, fill=text_color)
193
-
194
- def add_text_to_image(
195
- input_image,
196
- text,
197
- font_size,
198
- color,
199
- opacity,
200
- x_position,
201
- y_position,
202
- thickness,
203
- text_position_type,
204
- font_choice
205
- ):
206
- try:
207
- if input_image is None or text.strip() == "":
208
- return input_image
209
-
210
- if not isinstance(input_image, Image.Image):
211
- if isinstance(input_image, np.ndarray):
212
- image = Image.fromarray(input_image)
213
- else:
214
- raise ValueError("Unsupported image type")
215
- else:
216
- image = input_image.copy()
217
-
218
- if image.mode != 'RGBA':
219
- image = image.convert('RGBA')
220
-
221
- font_files = {
222
- "Default": "DejaVuSans.ttf",
223
- "Korean Regular": "ko-Regular.ttf"
224
- }
225
-
226
- try:
227
- font_file = font_files.get(font_choice, "DejaVuSans.ttf")
228
- font = ImageFont.truetype(font_file, int(font_size))
229
- except Exception as e:
230
- print(f"Font loading error ({font_choice}): {str(e)}")
231
- font = ImageFont.load_default()
232
-
233
- color_map = {
234
- 'White': (255, 255, 255),
235
- 'Black': (0, 0, 0),
236
- 'Red': (255, 0, 0),
237
- 'Green': (0, 255, 0),
238
- 'Blue': (0, 0, 255),
239
- 'Yellow': (255, 255, 0),
240
- 'Purple': (128, 0, 128)
241
- }
242
- rgb_color = color_map.get(color, (255, 255, 255))
243
-
244
- temp_draw = ImageDraw.Draw(image)
245
- text_bbox = temp_draw.textbbox((0, 0), text, font=font)
246
- text_width = text_bbox[2] - text_bbox[0]
247
- text_height = text_bbox[3] - text_bbox[1]
248
-
249
- actual_x = int((image.width - text_width) * (x_position / 100))
250
- actual_y = int((image.height - text_height) * (y_position / 100))
251
-
252
- text_color = (*rgb_color, int(opacity))
253
-
254
- txt_overlay = Image.new('RGBA', image.size, (255, 255, 255, 0))
255
- draw = ImageDraw.Draw(txt_overlay)
256
-
257
- add_text_with_stroke(
258
- draw,
259
- text,
260
- actual_x,
261
- actual_y,
262
- font,
263
- text_color,
264
- int(thickness)
265
- )
266
- output_image = Image.alpha_composite(image, txt_overlay)
267
-
268
- output_image = output_image.convert('RGB')
269
-
270
- return output_image
271
-
272
- except Exception as e:
273
- print(f"Error in add_text_to_image: {str(e)}")
274
- return input_image
275
-
276
-
277
- css = """
278
- footer {display: none}
279
- .main-title {
280
- text-align: center;
281
- margin: 1em 0;
282
- padding: 1.5em;
283
- background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%);
284
- border-radius: 15px;
285
- box-shadow: 0 4px 6px rgba(0,0,0,0.1);
286
- }
287
- .main-title h1 {
288
- color: #2196F3;
289
- font-size: 2.8em;
290
- margin-bottom: 0.3em;
291
- font-weight: 700;
292
- }
293
- .main-title p {
294
- color: #555;
295
- font-size: 1.3em;
296
- line-height: 1.4;
297
- }
298
- .container {
299
- max-width: 1200px;
300
- margin: auto;
301
- padding: 20px;
302
- }
303
- .input-panel, .output-panel {
304
- background: white;
305
- padding: 1.5em;
306
- border-radius: 12px;
307
- box-shadow: 0 2px 8px rgba(0,0,0,0.08);
308
- margin-bottom: 1em;
309
- }
310
- """
311
-
312
- with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
313
- gr.HTML("""
314
- <div class="main-title">
315
- <h1>🎨 Webtoon Studio</h1>
316
- <p>Generate webtoon-style images and add text with various styles and positions.</p>
317
- </div>
318
- """)
319
-
320
- with gr.Row():
321
- with gr.Column(scale=1):
322
- # 이미지 생성 섹션
323
- gen_prompt = gr.Textbox(
324
- label="Generation Prompt",
325
- placeholder="Enter your image generation prompt..."
326
- )
327
- with gr.Row():
328
- gen_width = gr.Slider(512, 1024, 768, step=64, label="Width")
329
- gen_height = gr.Slider(512, 1024, 768, step=64, label="Height")
330
-
331
- with gr.Row():
332
- guidance_scale = gr.Slider(1, 20, 7.5, step=0.5, label="Guidance Scale")
333
- num_steps = gr.Slider(1, 50, 30, step=1, label="Number of Steps")
334
-
335
- with gr.Row():
336
- seed = gr.Number(label="Seed", value=-1)
337
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
338
-
339
- generate_btn = gr.Button("Generate Image", variant="primary")
340
-
341
- output_image = gr.Image(
342
- label="Generated Image",
343
- type="pil",
344
- show_download_button=True
345
- )
346
- output_seed = gr.Number(label="Used Seed", interactive=False)
347
-
348
- # 텍스트 추가 섹션
349
- with gr.Accordion("Text Options", open=False):
350
- text_input = gr.Textbox(
351
- label="Text Content",
352
- placeholder="Enter text to add..."
353
- )
354
- text_position_type = gr.Radio(
355
- choices=["Text Over Image"],
356
- value="Text Over Image",
357
- label="Text Position",
358
- visible=True
359
- )
360
- with gr.Row():
361
- font_choice = gr.Dropdown(
362
- choices=["Default", "Korean Regular"],
363
- value="Default",
364
- label="Font Selection",
365
- interactive=True
366
- )
367
- font_size = gr.Slider(
368
- minimum=10,
369
- maximum=200,
370
- value=40,
371
- step=5,
372
- label="Font Size"
373
- )
374
- with gr.Row():
375
- color_dropdown = gr.Dropdown(
376
- choices=["White", "Black", "Red", "Green", "Blue", "Yellow", "Purple"],
377
- value="White",
378
- label="Text Color"
379
- )
380
- thickness = gr.Slider(
381
- minimum=0,
382
- maximum=10,
383
- value=1,
384
- step=1,
385
- label="Text Thickness"
386
- )
387
- with gr.Row():
388
- opacity_slider = gr.Slider(
389
- minimum=0,
390
- maximum=255,
391
- value=255,
392
- step=1,
393
- label="Opacity"
394
- )
395
- with gr.Row():
396
- x_position = gr.Slider(
397
- minimum=0,
398
- maximum=100,
399
- value=50,
400
- step=1,
401
- label="Left(0%)~Right(100%)"
402
- )
403
- y_position = gr.Slider(
404
- minimum=0,
405
- maximum=100,
406
- value=50,
407
- step=1,
408
- label="High(0%)~Low(100%)"
409
- )
410
- add_text_btn = gr.Button("Apply Text", variant="primary")
411
-
412
- # 이벤트 바인딩
413
- generate_btn.click(
414
- fn=generate_image,
415
- inputs=[
416
- gen_prompt,
417
- seed,
418
- randomize_seed,
419
- gen_width,
420
- gen_height,
421
- guidance_scale,
422
- num_steps,
423
- ],
424
- outputs=[output_image, output_seed]
425
- )
426
-
427
- add_text_btn.click(
428
- fn=add_text_to_image,
429
- inputs=[
430
- output_image,
431
- text_input,
432
- font_size,
433
- color_dropdown,
434
- opacity_slider,
435
- x_position,
436
- y_position,
437
- thickness,
438
- text_position_type,
439
- font_choice
440
- ],
441
- outputs=output_image
442
- )
443
-
444
- demo.queue(max_size=5)
445
- demo.launch(
446
- server_name="0.0.0.0",
447
- server_port=7860,
448
- share=False,
449
- max_threads=2
450
- )