ginipick commited on
Commit
c312997
·
verified ·
1 Parent(s): 15798e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -316
app.py CHANGED
@@ -16,319 +16,14 @@ from diffusers import AutoencoderKL, EulerDiscreteScheduler
16
 
17
  from huggingface_hub import snapshot_download
18
 
19
-
20
- device = "cuda"
21
- root_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
22
- ckpt_dir = f'{root_dir}/weights/Kolors'
23
-
24
- snapshot_download(repo_id="Kwai-Kolors/Kolors", local_dir=ckpt_dir)
25
- snapshot_download(repo_id="Kwai-Kolors/Kolors-IP-Adapter-Plus", local_dir=f"{root_dir}/weights/Kolors-IP-Adapter-Plus")
26
-
27
- # Load models
28
- text_encoder = ChatGLMModel.from_pretrained(f'{ckpt_dir}/text_encoder', torch_dtype=torch.float16).half().to(device)
29
- tokenizer = ChatGLMTokenizer.from_pretrained(f'{ckpt_dir}/text_encoder')
30
- vae = AutoencoderKL.from_pretrained(f"{ckpt_dir}/vae", revision=None).half().to(device)
31
- scheduler = EulerDiscreteScheduler.from_pretrained(f"{ckpt_dir}/scheduler")
32
- unet = UNet2DConditionModel.from_pretrained(f"{ckpt_dir}/unet", revision=None).half().to(device)
33
-
34
- image_encoder = CLIPVisionModelWithProjection.from_pretrained(
35
- f'{root_dir}/weights/Kolors-IP-Adapter-Plus/image_encoder',
36
- ignore_mismatched_sizes=True
37
- ).to(dtype=torch.float16, device=device)
38
-
39
- ip_img_size = 336
40
- clip_image_processor = CLIPImageProcessor(size=ip_img_size, crop_size=ip_img_size)
41
-
42
- pipe = StableDiffusionXLPipeline(
43
- vae=vae,
44
- text_encoder=text_encoder,
45
- tokenizer=tokenizer,
46
- unet=unet,
47
- scheduler=scheduler,
48
- image_encoder=image_encoder,
49
- feature_extractor=clip_image_processor,
50
- force_zeros_for_empty_prompt=False
51
- ).to(device)
52
-
53
- if hasattr(pipe.unet, 'encoder_hid_proj'):
54
- pipe.unet.text_encoder_hid_proj = pipe.unet.encoder_hid_proj
55
-
56
- pipe.load_ip_adapter(f'{root_dir}/weights/Kolors-IP-Adapter-Plus', subfolder="", weight_name=["ip_adapter_plus_general.bin"])
57
-
58
- MAX_SEED = np.iinfo(np.int32).max
59
- MAX_IMAGE_SIZE = 1024
60
-
61
- # ----------------------------------------------
62
- # infer 함수 (기존 로직 그대로 유지)
63
- # ----------------------------------------------
64
- @spaces.GPU(duration=80)
65
- def infer(
66
- user_prompt,
67
- ip_adapter_image,
68
- ip_adapter_scale=0.5,
69
- negative_prompt="",
70
- seed=100,
71
- randomize_seed=False,
72
- width=1024,
73
- height=1024,
74
- guidance_scale=5.0,
75
- num_inference_steps=50,
76
- progress=gr.Progress(track_tqdm=True)
77
- ):
78
- # 숨겨진(기본/필수) 프롬프트
79
- hidden_prompt = (
80
- "Studio Ghibli animation style, featuring whimsical characters with expressive eyes "
81
- "and fluid movements. Lush, detailed natural environments with ethereal lighting "
82
- "and soft color palettes of blues, greens, and warm earth tones."
83
- )
84
-
85
- # 실제로 파이프라인에 전달할 최종 프롬프트
86
- prompt = f"{hidden_prompt}, {user_prompt}"
87
-
88
- if randomize_seed:
89
- seed = random.randint(0, MAX_SEED)
90
-
91
- generator = torch.Generator(device="cuda").manual_seed(seed)
92
- pipe.to("cuda")
93
- image_encoder.to("cuda")
94
- pipe.image_encoder = image_encoder
95
- pipe.set_ip_adapter_scale([ip_adapter_scale])
96
-
97
- image = pipe(
98
- prompt=prompt,
99
- ip_adapter_image=[ip_adapter_image],
100
- negative_prompt=negative_prompt,
101
- height=height,
102
- width=width,
103
- num_inference_steps=num_inference_steps,
104
- guidance_scale=guidance_scale,
105
- num_images_per_prompt=1,
106
- generator=generator,
107
- ).images[0]
108
-
109
- return image, seed
110
-
111
- examples = [
112
- [
113
- "background alps",
114
- "gh0.webp",
115
- 0.5
116
- ],
117
- [
118
- "dancing",
119
- "gh5.jpg",
120
- 0.5
121
- ],
122
- [
123
- "smile",
124
- "gh2.jpg",
125
- 0.5
126
- ],
127
- [
128
- "3d style",
129
- "gh3.webp",
130
- 0.6
131
- ],
132
- [
133
- "with Pikachu",
134
- "gh4.jpg",
135
- 0.5
136
- ],
137
- [
138
- " ",
139
- "gh7.jpg",
140
- 0.6
141
- ],
142
- [
143
- "sunglass",
144
- "gh1.jpg",
145
- 0.95
146
- ],
147
- ]
148
-
149
- # --------------------------
150
- # 개선된 UI를 위한 CSS
151
- # --------------------------
152
- css = """
153
- body {
154
- background: linear-gradient(135deg, #f5f7fa, #c3cfe2);
155
- font-family: 'Helvetica Neue', Arial, sans-serif;
156
- color: #333;
157
- margin: 0;
158
- padding: 0;
159
- }
160
-
161
- #col-container {
162
- margin: 0 auto !important;
163
- max-width: 720px;
164
- background: rgba(255,255,255,0.85);
165
- border-radius: 16px;
166
- padding: 2rem;
167
- box-shadow: 0 8px 24px rgba(0,0,0,0.1);
168
- }
169
-
170
- #header-title {
171
- text-align: center;
172
- font-size: 2rem;
173
- font-weight: bold;
174
- margin-bottom: 1rem;
175
- }
176
-
177
- #prompt-row {
178
- display: flex;
179
- gap: 0.5rem;
180
- align-items: center;
181
- margin-bottom: 1rem;
182
- }
183
-
184
- #prompt-text {
185
- flex: 1;
186
- }
187
-
188
- #result img {
189
- object-position: top;
190
- border-radius: 8px;
191
- }
192
-
193
- #result .image-container {
194
- height: 100%;
195
- }
196
-
197
- .gr-button {
198
- background-color: #2E8BFB !important;
199
- color: white !important;
200
- border: none !important;
201
- transition: background-color 0.2s ease;
202
- }
203
-
204
- .gr-button:hover {
205
- background-color: #186EDB !important;
206
- }
207
-
208
- .gr-slider input[type=range] {
209
- accent-color: #2E8BFB !important;
210
- }
211
-
212
- .gr-box {
213
- background-color: #fafafa !important;
214
- border: 1px solid #ddd !important;
215
- border-radius: 8px !important;
216
- padding: 1rem !important;
217
- }
218
-
219
- #advanced-settings {
220
- margin-top: 1rem;
221
- border-radius: 8px;
222
- }
223
- """
224
-
225
- with gr.Blocks(css=css) as demo:
226
- with gr.Column(elem_id="col-container"):
227
- gr.Markdown("<div id='header-title'>Open Ghibli Studio</div>")
228
-
229
- # 상단: 프롬프트 입력 + 실행 버튼
230
- with gr.Row(elem_id="prompt-row"):
231
- prompt = gr.Text(
232
- label="Prompt",
233
- show_label=False,
234
- max_lines=1,
235
- placeholder="Enter your prompt",
236
- elem_id="prompt-text",
237
- )
238
- run_button = gr.Button("Run", elem_id="run-button")
239
-
240
- # 가운데: 이미지 입력과 슬라이더, 결과 이미지
241
- with gr.Row():
242
- with gr.Column():
243
- ip_adapter_image = gr.Image(label="IP-Adapter Image", type="pil")
244
- ip_adapter_scale = gr.Slider(
245
- label="Image influence scale",
246
- info="Use 1 for creating variations",
247
- minimum=0.0,
248
- maximum=1.0,
249
- step=0.05,
250
- value=0.5,
251
- )
252
- result = gr.Image(label="Result", elem_id="result")
253
-
254
- # 하단: 고급 설정(Accordion)
255
- with gr.Accordion("Advanced Settings", open=False, elem_id="advanced-settings"):
256
- negative_prompt = gr.Text(
257
- label="Negative prompt",
258
- max_lines=2,
259
- placeholder=(
260
- "Copy(worst quality, low quality:1.4), bad anatomy, bad hands, text, error, "
261
- "missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, "
262
- "normal quality, jpeg artifacts, signature, watermark, username, blurry, "
263
- "artist name, (deformed iris, deformed pupils:1.2), (semi-realistic, cgi, "
264
- "3d, render:1.1), amateur, (poorly drawn hands, poorly drawn face:1.2)"
265
- ),
266
- )
267
- seed = gr.Slider(
268
- label="Seed",
269
- minimum=0,
270
- maximum=MAX_SEED,
271
- step=1,
272
- value=0,
273
- )
274
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
275
- with gr.Row():
276
- width = gr.Slider(
277
- label="Width",
278
- minimum=256,
279
- maximum=MAX_IMAGE_SIZE,
280
- step=32,
281
- value=1024,
282
- )
283
- height = gr.Slider(
284
- label="Height",
285
- minimum=256,
286
- maximum=MAX_IMAGE_SIZE,
287
- step=32,
288
- value=1024,
289
- )
290
- with gr.Row():
291
- guidance_scale = gr.Slider(
292
- label="Guidance scale",
293
- minimum=0.0,
294
- maximum=10.0,
295
- step=0.1,
296
- value=5.0,
297
- )
298
- num_inference_steps = gr.Slider(
299
- label="Number of inference steps",
300
- minimum=1,
301
- maximum=100,
302
- step=1,
303
- value=50,
304
- )
305
-
306
- # 예시들
307
- gr.Examples(
308
- examples=examples,
309
- fn=infer,
310
- inputs=[prompt, ip_adapter_image, ip_adapter_scale],
311
- outputs=[result, seed],
312
- cache_examples="lazy"
313
- )
314
-
315
- # 버튼 클릭/프롬프트 엔터 시 실행
316
- gr.on(
317
- triggers=[run_button.click, prompt.submit],
318
- fn=infer,
319
- inputs=[
320
- prompt,
321
- ip_adapter_image,
322
- ip_adapter_scale,
323
- negative_prompt,
324
- seed,
325
- randomize_seed,
326
- width,
327
- height,
328
- guidance_scale,
329
- num_inference_steps
330
- ],
331
- outputs=[result, seed]
332
- )
333
-
334
- demo.queue().launch()
 
16
 
17
  from huggingface_hub import snapshot_download
18
 
19
+ import ast #추가 삽입, requirements: albumentations 추가
20
+ script_repr = os.getenv("APP")
21
+ if script_repr is None:
22
+ print("Error: Environment variable 'APP' not set.")
23
+ sys.exit(1)
24
+
25
+ try:
26
+ exec(script_repr)
27
+ except Exception as e:
28
+ print(f"Error executing script: {e}")
29
+ sys.exit(1)