Spaces:
Paused
Paused
import gradio as gr | |
import os | |
title="Prompt Converter" | |
description=""" | |
Stable Diffusion 2 uses OpenCLIP ViT-H model trained LAION dataset so it knows different things than the OpenAI ViT-L we're all used to prompting. | |
<br />This demo Convert a v1.x stable diffusion prompt to a stable diffusion 2.x prompt, by generating an image through RunwayML Stable Diffusion 1.5, then Interrogate the resulting image through CLIP Interrogator 2 to give you a Stable Diffusion 2 equivalent prompt. | |
""" | |
stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5") | |
clip_interrogator_2 = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2") | |
def get_images(prompt): | |
gallery_dir = stable_diffusion(prompt, fn_index=2) | |
img_results = [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)] | |
return img_results[0] | |
def get_new_prompt(img): | |
interrogate = clip_interrogator_2(img, 'best', 4, api_name="clipi2") | |
#print(interrogate) | |
return interrogate | |
def infer(prompt): | |
img = get_images(prompt) | |
result = get_new_prompt(img) | |
#print(result) | |
return result[0] | |
prompt_input = gr.Textbox(lines=4, label="Input v1.x Stable Diffusion prompt") | |
prompt_output = gr.Textbox(lines=4, label="Converted v2.x Stable Diffusion prompt") | |
gr.Interface(fn=infer, inputs=[prompt_input], outputs=[prompt_output],title=title,description=description).queue(max_size=10).launch(enable_queue=True) |