Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -9,6 +9,11 @@
|
|
9 |
import os
|
10 |
#subprocess.run(['sh', './torch.sh'])
|
11 |
|
|
|
|
|
|
|
|
|
|
|
12 |
import gradio as gr
|
13 |
import numpy as np
|
14 |
from PIL import Image
|
@@ -58,15 +63,13 @@ styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
|
|
58 |
DEFAULT_STYLE_NAME = "Style Zero"
|
59 |
STYLE_NAMES = list(styles.keys())
|
60 |
|
61 |
-
# os.putenv('TORCH_LINALG_PREFER_CUSOLVER','1')
|
62 |
-
|
63 |
MAX_SEED = np.iinfo(np.int32).max
|
64 |
|
65 |
code = r'''
|
|
|
66 |
|
67 |
import torch
|
68 |
import paramiko
|
69 |
-
import os
|
70 |
|
71 |
torch.backends.cuda.matmul.allow_tf32 = False
|
72 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
@@ -74,12 +77,11 @@ torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False
|
|
74 |
torch.backends.cudnn.allow_tf32 = False
|
75 |
torch.backends.cudnn.deterministic = False
|
76 |
torch.backends.cudnn.benchmark = False
|
77 |
-
|
78 |
-
|
79 |
torch.set_float32_matmul_precision("highest")
|
80 |
|
81 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
82 |
-
|
83 |
FTP_HOST = os.getenv("FTP_HOST")
|
84 |
FTP_USER = os.getenv("FTP_USER")
|
85 |
FTP_PASS = os.getenv("FTP_PASS")
|
@@ -173,9 +175,7 @@ import gc
|
|
173 |
|
174 |
MAX_SEED = np.iinfo(np.int32).max
|
175 |
|
176 |
-
os.putenv('HF_HUB_ENABLE_HF_TRANSFER','1')
|
177 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
178 |
-
os.environ["SAFETENSORS_FAST_GPU"] = "1"
|
179 |
|
180 |
#accelerator = Accelerator(mixed_precision="bf16") # Example
|
181 |
|
@@ -197,7 +197,7 @@ def load_and_prepare_model():
|
|
197 |
)
|
198 |
pipe.vae = vaeXL #.to(torch.bfloat16)
|
199 |
#pipe.scheduler = sched
|
200 |
-
pipe.vae.do_resize = False
|
201 |
pipe.vae.do_convert_rgb = True
|
202 |
pipe.vae.set_default_attn_processor()
|
203 |
print(f'init noise scale: {pipe.scheduler.init_noise_sigma}')
|
@@ -253,7 +253,7 @@ def generate_30(
|
|
253 |
np.random.seed(seed)
|
254 |
torch.manual_seed(seed)
|
255 |
torch.cuda.manual_seed_all(seed)
|
256 |
-
generator = torch.Generator(device='
|
257 |
options = {
|
258 |
"prompt": [prompt],
|
259 |
"negative_prompt": [negative_prompt],
|
@@ -311,7 +311,7 @@ def generate_60(
|
|
311 |
np.random.seed(seed)
|
312 |
torch.manual_seed(seed)
|
313 |
torch.cuda.manual_seed_all(seed)
|
314 |
-
generator = torch.Generator(device='
|
315 |
options = {
|
316 |
"prompt": [prompt],
|
317 |
"negative_prompt": [negative_prompt],
|
@@ -369,7 +369,7 @@ def generate_90(
|
|
369 |
np.random.seed(seed)
|
370 |
torch.manual_seed(seed)
|
371 |
torch.cuda.manual_seed_all(seed)
|
372 |
-
generator = torch.Generator(device='
|
373 |
options = {
|
374 |
"prompt": [prompt],
|
375 |
"negative_prompt": [negative_prompt],
|
|
|
9 |
import os
|
10 |
#subprocess.run(['sh', './torch.sh'])
|
11 |
|
12 |
+
os.putenv('TORCH_LINALG_PREFER_CUSOLVER','1')
|
13 |
+
os.putenv('PYTORCH_CUDA_ALLOC_CONF','max_split_size_mb:128')
|
14 |
+
os.environ["SAFETENSORS_FAST_GPU"] = "1"
|
15 |
+
os.putenv('HF_HUB_ENABLE_HF_TRANSFER','1')
|
16 |
+
|
17 |
import gradio as gr
|
18 |
import numpy as np
|
19 |
from PIL import Image
|
|
|
63 |
DEFAULT_STYLE_NAME = "Style Zero"
|
64 |
STYLE_NAMES = list(styles.keys())
|
65 |
|
|
|
|
|
66 |
MAX_SEED = np.iinfo(np.int32).max
|
67 |
|
68 |
code = r'''
|
69 |
+
import os
|
70 |
|
71 |
import torch
|
72 |
import paramiko
|
|
|
73 |
|
74 |
torch.backends.cuda.matmul.allow_tf32 = False
|
75 |
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = False
|
|
|
77 |
torch.backends.cudnn.allow_tf32 = False
|
78 |
torch.backends.cudnn.deterministic = False
|
79 |
torch.backends.cudnn.benchmark = False
|
80 |
+
torch.backends.cuda.preferred_blas_library="cublas"
|
81 |
+
torch.backends.cuda.preferred_linalg_library="cusolver"
|
82 |
torch.set_float32_matmul_precision("highest")
|
83 |
|
84 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
85 |
FTP_HOST = os.getenv("FTP_HOST")
|
86 |
FTP_USER = os.getenv("FTP_USER")
|
87 |
FTP_PASS = os.getenv("FTP_PASS")
|
|
|
175 |
|
176 |
MAX_SEED = np.iinfo(np.int32).max
|
177 |
|
|
|
178 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
|
179 |
|
180 |
#accelerator = Accelerator(mixed_precision="bf16") # Example
|
181 |
|
|
|
197 |
)
|
198 |
pipe.vae = vaeXL #.to(torch.bfloat16)
|
199 |
#pipe.scheduler = sched
|
200 |
+
#pipe.vae.do_resize = False
|
201 |
pipe.vae.do_convert_rgb = True
|
202 |
pipe.vae.set_default_attn_processor()
|
203 |
print(f'init noise scale: {pipe.scheduler.init_noise_sigma}')
|
|
|
253 |
np.random.seed(seed)
|
254 |
torch.manual_seed(seed)
|
255 |
torch.cuda.manual_seed_all(seed)
|
256 |
+
generator = torch.Generator(device='cpu').manual_seed(seed)
|
257 |
options = {
|
258 |
"prompt": [prompt],
|
259 |
"negative_prompt": [negative_prompt],
|
|
|
311 |
np.random.seed(seed)
|
312 |
torch.manual_seed(seed)
|
313 |
torch.cuda.manual_seed_all(seed)
|
314 |
+
generator = torch.Generator(device='cpu').manual_seed(seed)
|
315 |
options = {
|
316 |
"prompt": [prompt],
|
317 |
"negative_prompt": [negative_prompt],
|
|
|
369 |
np.random.seed(seed)
|
370 |
torch.manual_seed(seed)
|
371 |
torch.cuda.manual_seed_all(seed)
|
372 |
+
generator = torch.Generator(device='cpu').manual_seed(seed)
|
373 |
options = {
|
374 |
"prompt": [prompt],
|
375 |
"negative_prompt": [negative_prompt],
|