fffiloni commited on
Commit
0b317b0
·
1 Parent(s): 0d644e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -2,7 +2,10 @@ import gradio as gr
2
  import os
3
 
4
  title="Prompt Converter"
5
- description="Convert a v1.x stable diffusion prompt to a stable diffusion 2.x prompt"
 
 
 
6
  stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5")
7
  clip_interrogator_2 = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2")
8
 
@@ -25,7 +28,7 @@ def infer(prompt):
25
  #print(result)
26
  return result[0]
27
 
28
- prompt_input = gr.Textbox(lines=4, label="Input Stable Diffusion 1 prompt")
29
- prompt_output = gr.Textbox(lines=4, label="Converted Stable Diffusion 2 prompt ")
30
 
31
  gr.Interface(fn=infer, inputs=[prompt_input], outputs=[prompt_output],title=title,description=description).queue(max_size=10).launch(enable_queue=True)
 
2
  import os
3
 
4
  title="Prompt Converter"
5
+ description="""
6
+ Stable Diffusion 2 uses OpenCLIP ViT-H model trained LAION dataset so it knows different things than the OpenAI ViT-L we're all used to prompting.
7
+ <br />This demo Convert a v1.x stable diffusion prompt to a stable diffusion 2.x prompt, by generating an image through RunwayML Stable Diffusion 1.5, then Interrogate the resulting image through CLIP Interrogator 2 to give you a Stable Diffusion 2 equivalent prompt.
8
+ """
9
  stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5")
10
  clip_interrogator_2 = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2")
11
 
 
28
  #print(result)
29
  return result[0]
30
 
31
+ prompt_input = gr.Textbox(lines=4, label="Input v1.x Stable Diffusion prompt")
32
+ prompt_output = gr.Textbox(lines=4, label="Converted v2.x Stable Diffusion prompt")
33
 
34
  gr.Interface(fn=infer, inputs=[prompt_input], outputs=[prompt_output],title=title,description=description).queue(max_size=10).launch(enable_queue=True)