Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -127,20 +127,21 @@ def scheduler_swap_callback(pipeline, step_index, timestep, callback_kwargs):
|
|
127 |
return {"latents": callback_kwargs["latents"]}
|
128 |
|
129 |
def load_and_prepare_model():
|
|
|
130 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
131 |
#vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None, use_safetensors=False).to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
132 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
133 |
-
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
|
134 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
135 |
'ford442/RealVisXL_V5.0_BF16',
|
136 |
#torch_dtype=torch.bfloat16,
|
137 |
add_watermarker=False,
|
138 |
# low_cpu_mem_usage = False,
|
139 |
token = HF_TOKEN,
|
140 |
-
|
141 |
)
|
142 |
pipe.vae = vaeXL #.to(torch.bfloat16)
|
143 |
-
|
144 |
#pipe.vae.do_resize=False
|
145 |
#pipe.vae.vae_scale_factor=8
|
146 |
# pipe.to(device=device, dtype=torch.bfloat16)
|
@@ -161,20 +162,20 @@ def load_and_prepare_model():
|
|
161 |
'''
|
162 |
|
163 |
pipe.unet = pipe.unet.to(memory_format=torch.contiguous_format)
|
|
|
164 |
|
165 |
return pipe
|
166 |
|
167 |
hidet.option.parallel_build(True)
|
168 |
-
hidet.option.parallel_tune(-1,
|
169 |
|
170 |
torch._dynamo.config.suppress_errors = True
|
171 |
torch._dynamo.disallow_in_graph(diffusers.models.attention.BasicTransformerBlock)
|
172 |
# Preload and compile both models
|
173 |
-
pipe = load_and_prepare_model()
|
174 |
|
175 |
|
176 |
# more search
|
177 |
-
hidet.torch.dynamo_config.search_space(
|
178 |
#hidet.torch.dynamo_config.dump_graph_ir("./local_graph")
|
179 |
hidet.option.cache_dir("local_cache")
|
180 |
# automatically transform the model to use float16 data type
|
@@ -183,8 +184,8 @@ hidet.option.cache_dir("local_cache")
|
|
183 |
#hidet.torch.dynamo_config.use_fp16_reduction(True)
|
184 |
# use tensorcore
|
185 |
hidet.torch.dynamo_config.use_tensor_core()
|
186 |
-
pipe.unet = torch.compile(pipe.unet, backend="hidet")
|
187 |
|
|
|
188 |
|
189 |
MAX_SEED = np.iinfo(np.int32).max
|
190 |
|
|
|
127 |
return {"latents": callback_kwargs["latents"]}
|
128 |
|
129 |
def load_and_prepare_model():
|
130 |
+
sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, steps_offset=1) #,use_karras_sigmas=True)
|
131 |
vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
132 |
#vaeRV = AutoencoderKL.from_pretrained("SG161222/RealVisXL_V5.0", subfolder='vae', safety_checker=None, use_safetensors=False).to(device).to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
133 |
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler',beta_schedule="scaled_linear")
|
134 |
+
#sched = EulerAncestralDiscreteScheduler.from_pretrained('ford442/RealVisXL_V5.0_BF16', subfolder='scheduler')
|
135 |
pipe = StableDiffusionXLPipeline.from_pretrained(
|
136 |
'ford442/RealVisXL_V5.0_BF16',
|
137 |
#torch_dtype=torch.bfloat16,
|
138 |
add_watermarker=False,
|
139 |
# low_cpu_mem_usage = False,
|
140 |
token = HF_TOKEN,
|
141 |
+
# scheduler = sched,
|
142 |
)
|
143 |
pipe.vae = vaeXL #.to(torch.bfloat16)
|
144 |
+
pipe.scheduler = sched,
|
145 |
#pipe.vae.do_resize=False
|
146 |
#pipe.vae.vae_scale_factor=8
|
147 |
# pipe.to(device=device, dtype=torch.bfloat16)
|
|
|
162 |
'''
|
163 |
|
164 |
pipe.unet = pipe.unet.to(memory_format=torch.contiguous_format)
|
165 |
+
pipe.unet = torch.compile(pipe.unet, backend="hidet")
|
166 |
|
167 |
return pipe
|
168 |
|
169 |
hidet.option.parallel_build(True)
|
170 |
+
hidet.option.parallel_tune(-1,8.0)
|
171 |
|
172 |
torch._dynamo.config.suppress_errors = True
|
173 |
torch._dynamo.disallow_in_graph(diffusers.models.attention.BasicTransformerBlock)
|
174 |
# Preload and compile both models
|
|
|
175 |
|
176 |
|
177 |
# more search
|
178 |
+
hidet.torch.dynamo_config.search_space(1)
|
179 |
#hidet.torch.dynamo_config.dump_graph_ir("./local_graph")
|
180 |
hidet.option.cache_dir("local_cache")
|
181 |
# automatically transform the model to use float16 data type
|
|
|
184 |
#hidet.torch.dynamo_config.use_fp16_reduction(True)
|
185 |
# use tensorcore
|
186 |
hidet.torch.dynamo_config.use_tensor_core()
|
|
|
187 |
|
188 |
+
pipe = load_and_prepare_model()
|
189 |
|
190 |
MAX_SEED = np.iinfo(np.int32).max
|
191 |
|