import numpy as np import cv2 import onnxruntime import gradio as gr from PIL import Image # === Upscaler Logic === def pre_process(img: np.array) -> np.array: img = np.transpose(img[:, :, 0:3], (2, 0, 1)) return np.expand_dims(img, axis=0).astype(np.float32) def post_process(img: np.array) -> np.array: img = np.squeeze(img) return np.transpose(img, (1, 2, 0))[:, :, ::-1].astype(np.uint8) # ONNX inference with session cache _session_cache = {} def inference(model_path: str, img_array: np.array) -> np.array: if model_path not in _session_cache: opts = onnxruntime.SessionOptions() opts.intra_op_num_threads = 1 opts.inter_op_num_threads = 1 _session_cache[model_path] = onnxruntime.InferenceSession(model_path, opts) session = _session_cache[model_path] inputs = {session.get_inputs()[0].name: img_array} return session.run(None, inputs)[0] # PIL to BGR conversion def convert_pil_to_cv2(image: Image.Image) -> np.array: arr = np.array(image) if arr.ndim == 2: return cv2.cvtColor(arr, cv2.COLOR_GRAY2BGR) return arr[:, :, ::-1].copy() # Upscale handler def upscale(image, model_choice): model_path = f"models/{model_choice}.ort" img = convert_pil_to_cv2(image) # ensure 3 channels if img.ndim == 2: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) # 4-channel alpha handling if img.shape[2] == 4: alpha = cv2.cvtColor(img[:, :, 3], cv2.COLOR_GRAY2BGR) out_a = post_process(inference(model_path, pre_process(alpha))) out_a = cv2.cvtColor(out_a, cv2.COLOR_BGR2GRAY) img_rgb = img[:, :, :3] out_rgb = post_process(inference(model_path, pre_process(img_rgb))) out_rgba = cv2.cvtColor(out_rgb, cv2.COLOR_BGR2BGRA) out_rgba[:, :, 3] = out_a return out_rgba return post_process(inference(model_path, pre_process(img))) # === Custom CSS for styling & animations === custom_css = """ body .gradio-container { background: linear-gradient(-45deg, #ff9a9e, #fad0c4, #fad0c4, #ffdde1); background-size: 400% 400%; animation: gradientBG 15s ease infinite; } @keyframes gradientBG { 0% { background-position: 0% 50%; } 50% { background-position: 100% 50%; } 100% { background-position: 0% 50%; } } .fancy-title { font-family: 'Poppins', sans-serif; font-size: 3rem; background: linear-gradient(90deg, #7F7FD5, #86A8E7, #91EAE4); -webkit-background-clip: text; -webkit-text-fill-color: transparent; animation: fadeInText 2s ease-in-out; text-align: center; } @keyframes fadeInText { 0% { opacity: 0; transform: translateY(-20px); } 100% { opacity: 1; transform: translateY(0); } } .gradio-image { animation: fadeIn 1s ease-in; border-radius: 12px; box-shadow: 0 8px 16px rgba(0,0,0,0.2); } @keyframes fadeIn { from { opacity: 0; } to { opacity: 1; } } .gradio-radio input[type="radio"] + label:hover { transform: scale(1.1); transition: transform 0.2s; } .gradio-button { background: linear-gradient(90deg, #FF8A00, #E52E71); border: none; border-radius: 8px; color: white; font-weight: bold; padding: 12px 24px; cursor: pointer; transition: background 0.3s, transform 0.2s; } .gradio-button:hover { background: linear-gradient(90deg, #E52E71, #FF8A00); transform: scale(1.05); } #upscale_btn { margin-top: 10px; } """ # === Gradio Blocks App (no theme string) === with gr.Blocks(css=custom_css) as demo: gr.HTML("

✨ Ultra AI Image Upscaler ✨

") with gr.Row(): inp = gr.Image(type="pil", label="Drop Your Image Here") model = gr.Radio([ "modelx2", "modelx2_25JXL", "modelx4", "minecraft_modelx4" ], label="Upscaler Model", value="modelx2") btn = gr.Button("Upscale Image", elem_id="upscale_btn") out = gr.Image(label="Upscaled Output", elem_classes="gradio-image") btn.click(fn=upscale, inputs=[inp, model], outputs=out) gr.HTML("

Powered by ONNX Runtime & Gradio Blocks

") if __name__ == "__main__": demo.launch()