ford442 commited on
Commit
ec46a0b
·
verified ·
1 Parent(s): 8a1599f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +97 -0
app.py CHANGED
@@ -389,6 +389,82 @@ def uploadNote(prompt,num_inference_steps,guidance_scale,timestamp):
389
  f.write(f"Model VAE: sdxl-vae-bf16\n")
390
  f.write(f"To cuda and bfloat \n")
391
  return filename
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
 
393
  '''
394
  pyx = cyper.inline(code, fast_indexing=True, directives=dict(boundscheck=False, wraparound=False, language_level=3))
@@ -583,6 +659,7 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
583
  placeholder="Enter your prompt",
584
  container=False,
585
  )
 
586
  run_button_30 = gr.Button("Run 30 Seconds", scale=0)
587
  run_button_60 = gr.Button("Run 60 Seconds", scale=0)
588
  run_button_90 = gr.Button("Run 90 Seconds", scale=0)
@@ -662,6 +739,26 @@ with gr.Blocks(theme=gr.themes.Origin(),css=css) as demo:
662
  api_name=False,
663
  )
664
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
665
  gr.on(
666
  triggers=[
667
  run_button_30.click,
 
389
  f.write(f"Model VAE: sdxl-vae-bf16\n")
390
  f.write(f"To cuda and bfloat \n")
391
  return filename
392
+
393
+ import spaces
394
+ import torch.nn.functional as F
395
+ from sageattention import sageattn
396
+ import random
397
+ import uuid
398
+ #import gradio as gr
399
+ import numpy as np
400
+ from PIL import Image
401
+
402
+ import diffusers
403
+ #from diffusers import AutoencoderKL, StableDiffusionXLPipeline
404
+ #from diffusers import EulerAncestralDiscreteScheduler
405
+ #from typing import Tuple
406
+ #import paramiko
407
+ import datetime
408
+ #import cyper
409
+ from image_gen_aux import UpscaleWithModel
410
+ #import torch
411
+ import time
412
+ import gc
413
+
414
+
415
+ @spaces.GPU(duration=40)
416
+ def generate_30c(
417
+ prompt: str,
418
+ negative_prompt: str = "",
419
+ use_negative_prompt: bool = False,
420
+ style_selection: str = "",
421
+ width: int = 768,
422
+ height: int = 768,
423
+ guidance_scale: float = 4,
424
+ num_inference_steps: int = 125,
425
+ sage: bool = False,
426
+ use_resolution_binning: bool = True,
427
+ progress=gr.Progress(track_tqdm=True)
428
+ ):
429
+ if sage==True:
430
+ F.scaled_dot_product_attention = sageattn
431
+ if sage==False:
432
+ F.scaled_dot_product_attention = F.scaled_dot_product_attention
433
+ seed = random.randint(0, MAX_SEED)
434
+ generator = torch.Generator(device='cuda').manual_seed(seed)
435
+ options = {
436
+ "prompt": [prompt],
437
+ "negative_prompt": [negative_prompt],
438
+ "negative_prompt_2": [neg_prompt_2],
439
+ "width": width,
440
+ "height": height,
441
+ "guidance_scale": guidance_scale,
442
+ "num_inference_steps": num_inference_steps,
443
+ "generator": generator,
444
+ "output_type": "pil",
445
+ "callback_on_step_end": pyx.scheduler_swap_callback,
446
+ }
447
+ if use_resolution_binning:
448
+ options["use_resolution_binning"] = True
449
+ images = []
450
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
451
+ filename = pyx.uploadNote(prompt,num_inference_steps,guidance_scale,timestamp)
452
+ pyx.upload_to_ftp(filename)
453
+ batch_options = options.copy()
454
+ rv_image = pipe(**batch_options).images[0]
455
+ sd_image_path = f"rv_C_{timestamp}.png"
456
+ rv_image.save(sd_image_path,optimize=False,compress_level=0)
457
+ pyx.upload_to_ftp(sd_image_path)
458
+ torch.set_float32_matmul_precision("medium")
459
+ with torch.no_grad():
460
+ upscale = upscaler(rv_image, tiling=True, tile_width=256, tile_height=256)
461
+ downscale1 = upscale.resize((upscale.width // 4, upscale.height // 4), Image.LANCZOS)
462
+ downscale_path = f"rv50_upscale_{timestamp}.png"
463
+ downscale1.save(downscale_path,optimize=False,compress_level=0)
464
+ pyx.upload_to_ftp(downscale_path)
465
+ unique_name = str(uuid.uuid4()) + ".png"
466
+ os.symlink(sd_image_path, unique_name)
467
+ return [unique_name]
468
 
469
  '''
470
  pyx = cyper.inline(code, fast_indexing=True, directives=dict(boundscheck=False, wraparound=False, language_level=3))
 
659
  placeholder="Enter your prompt",
660
  container=False,
661
  )
662
+ run_button_30c = gr.Button("Run 30 Seconds", scale=0)
663
  run_button_30 = gr.Button("Run 30 Seconds", scale=0)
664
  run_button_60 = gr.Button("Run 60 Seconds", scale=0)
665
  run_button_90 = gr.Button("Run 90 Seconds", scale=0)
 
739
  api_name=False,
740
  )
741
 
742
+ gr.on(
743
+ triggers=[
744
+ run_button_30c.click,
745
+ ],
746
+ # api_name="generate", # Add this line
747
+ fn=generate_30c,
748
+ inputs=[
749
+ prompt,
750
+ negative_prompt,
751
+ use_negative_prompt,
752
+ style_selection,
753
+ width,
754
+ height,
755
+ guidance_scale,
756
+ num_inference_steps,
757
+ sage,
758
+ ],
759
+ outputs=[result],
760
+ )
761
+
762
  gr.on(
763
  triggers=[
764
  run_button_30.click,