Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,17 +2,20 @@ import numpy as np
|
|
2 |
import cv2
|
3 |
import onnxruntime
|
4 |
import gradio as gr
|
|
|
5 |
|
6 |
-
# ===
|
7 |
def pre_process(img: np.array) -> np.array:
|
8 |
img = np.transpose(img[:, :, 0:3], (2, 0, 1))
|
9 |
return np.expand_dims(img, axis=0).astype(np.float32)
|
10 |
|
|
|
11 |
def post_process(img: np.array) -> np.array:
|
12 |
img = np.squeeze(img)
|
13 |
return np.transpose(img, (1, 2, 0))[:, :, ::-1].astype(np.uint8)
|
14 |
|
15 |
-
|
|
|
16 |
_session_cache = {}
|
17 |
def inference(model_path: str, img_array: np.array) -> np.array:
|
18 |
if model_path not in _session_cache:
|
@@ -24,48 +27,49 @@ def inference(model_path: str, img_array: np.array) -> np.array:
|
|
24 |
inputs = {session.get_inputs()[0].name: img_array}
|
25 |
return session.run(None, inputs)[0]
|
26 |
|
27 |
-
|
28 |
-
|
29 |
def convert_pil_to_cv2(image: Image.Image) -> np.array:
|
30 |
arr = np.array(image)
|
31 |
-
|
|
|
|
|
32 |
|
33 |
-
# Upscale
|
34 |
|
|
|
35 |
def upscale(image, model_choice):
|
36 |
model_path = f"models/{model_choice}.ort"
|
37 |
img = convert_pil_to_cv2(image)
|
|
|
38 |
if img.ndim == 2:
|
39 |
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
40 |
|
41 |
-
#
|
42 |
if img.shape[2] == 4:
|
43 |
alpha = cv2.cvtColor(img[:, :, 3], cv2.COLOR_GRAY2BGR)
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
return
|
51 |
-
|
52 |
-
# normal RGB
|
53 |
return post_process(inference(model_path, pre_process(img)))
|
54 |
|
55 |
-
|
|
|
56 |
custom_css = """
|
57 |
body .gradio-container {
|
58 |
background: linear-gradient(-45deg, #ff9a9e, #fad0c4, #fad0c4, #ffdde1);
|
59 |
background-size: 400% 400%;
|
60 |
animation: gradientBG 15s ease infinite;
|
61 |
}
|
62 |
-
|
63 |
@keyframes gradientBG {
|
64 |
0% { background-position: 0% 50%; }
|
65 |
50% { background-position: 100% 50%; }
|
66 |
100% { background-position: 0% 50%; }
|
67 |
}
|
68 |
-
|
69 |
.fancy-title {
|
70 |
font-family: 'Poppins', sans-serif;
|
71 |
font-size: 3rem;
|
@@ -75,28 +79,23 @@ body .gradio-container {
|
|
75 |
animation: fadeInText 2s ease-in-out;
|
76 |
text-align: center;
|
77 |
}
|
78 |
-
|
79 |
@keyframes fadeInText {
|
80 |
0% { opacity: 0; transform: translateY(-20px); }
|
81 |
100% { opacity: 1; transform: translateY(0); }
|
82 |
}
|
83 |
-
|
84 |
.gradio-image {
|
85 |
animation: fadeIn 1s ease-in;
|
86 |
border-radius: 12px;
|
87 |
box-shadow: 0 8px 16px rgba(0,0,0,0.2);
|
88 |
}
|
89 |
-
|
90 |
@keyframes fadeIn {
|
91 |
from { opacity: 0; }
|
92 |
to { opacity: 1; }
|
93 |
}
|
94 |
-
|
95 |
.gradio-radio input[type="radio"] + label:hover {
|
96 |
transform: scale(1.1);
|
97 |
transition: transform 0.2s;
|
98 |
}
|
99 |
-
|
100 |
.gradio-button {
|
101 |
background: linear-gradient(90deg, #FF8A00, #E52E71);
|
102 |
border: none;
|
@@ -107,24 +106,30 @@ body .gradio-container {
|
|
107 |
cursor: pointer;
|
108 |
transition: background 0.3s, transform 0.2s;
|
109 |
}
|
110 |
-
|
111 |
.gradio-button:hover {
|
112 |
background: linear-gradient(90deg, #E52E71, #FF8A00);
|
113 |
transform: scale(1.05);
|
114 |
}
|
|
|
|
|
|
|
115 |
"""
|
116 |
|
117 |
-
|
118 |
-
|
|
|
119 |
gr.HTML("<h1 class='fancy-title'>✨ Ultra AI Image Upscaler ✨</h1>")
|
120 |
with gr.Row():
|
121 |
inp = gr.Image(type="pil", label="Drop Your Image Here")
|
122 |
-
model = gr.Radio([
|
123 |
-
|
|
|
|
|
|
|
124 |
out = gr.Image(label="Upscaled Output", elem_classes="gradio-image")
|
125 |
|
126 |
btn.click(fn=upscale, inputs=[inp, model], outputs=out)
|
127 |
gr.HTML("<p style='text-align:center; color:#555;'>Powered by ONNX Runtime & Gradio Blocks</p>")
|
128 |
|
129 |
if __name__ == "__main__":
|
130 |
-
demo.launch()
|
|
|
2 |
import cv2
|
3 |
import onnxruntime
|
4 |
import gradio as gr
|
5 |
+
from PIL import Image
|
6 |
|
7 |
+
# === Upscaler Logic ===
|
8 |
def pre_process(img: np.array) -> np.array:
|
9 |
img = np.transpose(img[:, :, 0:3], (2, 0, 1))
|
10 |
return np.expand_dims(img, axis=0).astype(np.float32)
|
11 |
|
12 |
+
|
13 |
def post_process(img: np.array) -> np.array:
|
14 |
img = np.squeeze(img)
|
15 |
return np.transpose(img, (1, 2, 0))[:, :, ::-1].astype(np.uint8)
|
16 |
|
17 |
+
|
18 |
+
# ONNX inference with session cache
|
19 |
_session_cache = {}
|
20 |
def inference(model_path: str, img_array: np.array) -> np.array:
|
21 |
if model_path not in _session_cache:
|
|
|
27 |
inputs = {session.get_inputs()[0].name: img_array}
|
28 |
return session.run(None, inputs)[0]
|
29 |
|
30 |
+
|
31 |
+
# PIL to BGR conversion
|
32 |
def convert_pil_to_cv2(image: Image.Image) -> np.array:
|
33 |
arr = np.array(image)
|
34 |
+
if arr.ndim == 2:
|
35 |
+
return cv2.cvtColor(arr, cv2.COLOR_GRAY2BGR)
|
36 |
+
return arr[:, :, ::-1].copy()
|
37 |
|
|
|
38 |
|
39 |
+
# Upscale handler
|
40 |
def upscale(image, model_choice):
|
41 |
model_path = f"models/{model_choice}.ort"
|
42 |
img = convert_pil_to_cv2(image)
|
43 |
+
# ensure 3 channels
|
44 |
if img.ndim == 2:
|
45 |
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
|
46 |
|
47 |
+
# 4-channel alpha handling
|
48 |
if img.shape[2] == 4:
|
49 |
alpha = cv2.cvtColor(img[:, :, 3], cv2.COLOR_GRAY2BGR)
|
50 |
+
out_a = post_process(inference(model_path, pre_process(alpha)))
|
51 |
+
out_a = cv2.cvtColor(out_a, cv2.COLOR_BGR2GRAY)
|
52 |
+
img_rgb = img[:, :, :3]
|
53 |
+
out_rgb = post_process(inference(model_path, pre_process(img_rgb)))
|
54 |
+
out_rgba = cv2.cvtColor(out_rgb, cv2.COLOR_BGR2BGRA)
|
55 |
+
out_rgba[:, :, 3] = out_a
|
56 |
+
return out_rgba
|
57 |
+
|
|
|
58 |
return post_process(inference(model_path, pre_process(img)))
|
59 |
|
60 |
+
|
61 |
+
# === Custom CSS for styling & animations ===
|
62 |
custom_css = """
|
63 |
body .gradio-container {
|
64 |
background: linear-gradient(-45deg, #ff9a9e, #fad0c4, #fad0c4, #ffdde1);
|
65 |
background-size: 400% 400%;
|
66 |
animation: gradientBG 15s ease infinite;
|
67 |
}
|
|
|
68 |
@keyframes gradientBG {
|
69 |
0% { background-position: 0% 50%; }
|
70 |
50% { background-position: 100% 50%; }
|
71 |
100% { background-position: 0% 50%; }
|
72 |
}
|
|
|
73 |
.fancy-title {
|
74 |
font-family: 'Poppins', sans-serif;
|
75 |
font-size: 3rem;
|
|
|
79 |
animation: fadeInText 2s ease-in-out;
|
80 |
text-align: center;
|
81 |
}
|
|
|
82 |
@keyframes fadeInText {
|
83 |
0% { opacity: 0; transform: translateY(-20px); }
|
84 |
100% { opacity: 1; transform: translateY(0); }
|
85 |
}
|
|
|
86 |
.gradio-image {
|
87 |
animation: fadeIn 1s ease-in;
|
88 |
border-radius: 12px;
|
89 |
box-shadow: 0 8px 16px rgba(0,0,0,0.2);
|
90 |
}
|
|
|
91 |
@keyframes fadeIn {
|
92 |
from { opacity: 0; }
|
93 |
to { opacity: 1; }
|
94 |
}
|
|
|
95 |
.gradio-radio input[type="radio"] + label:hover {
|
96 |
transform: scale(1.1);
|
97 |
transition: transform 0.2s;
|
98 |
}
|
|
|
99 |
.gradio-button {
|
100 |
background: linear-gradient(90deg, #FF8A00, #E52E71);
|
101 |
border: none;
|
|
|
106 |
cursor: pointer;
|
107 |
transition: background 0.3s, transform 0.2s;
|
108 |
}
|
|
|
109 |
.gradio-button:hover {
|
110 |
background: linear-gradient(90deg, #E52E71, #FF8A00);
|
111 |
transform: scale(1.05);
|
112 |
}
|
113 |
+
#upscale_btn {
|
114 |
+
margin-top: 10px;
|
115 |
+
}
|
116 |
"""
|
117 |
|
118 |
+
|
119 |
+
# === Gradio Blocks App (no theme string) ===
|
120 |
+
with gr.Blocks(css=custom_css) as demo:
|
121 |
gr.HTML("<h1 class='fancy-title'>✨ Ultra AI Image Upscaler ✨</h1>")
|
122 |
with gr.Row():
|
123 |
inp = gr.Image(type="pil", label="Drop Your Image Here")
|
124 |
+
model = gr.Radio([
|
125 |
+
"modelx2", "modelx2_25JXL",
|
126 |
+
"modelx4", "minecraft_modelx4"
|
127 |
+
], label="Upscaler Model", value="modelx2")
|
128 |
+
btn = gr.Button("Upscale Image", elem_id="upscale_btn")
|
129 |
out = gr.Image(label="Upscaled Output", elem_classes="gradio-image")
|
130 |
|
131 |
btn.click(fn=upscale, inputs=[inp, model], outputs=out)
|
132 |
gr.HTML("<p style='text-align:center; color:#555;'>Powered by ONNX Runtime & Gradio Blocks</p>")
|
133 |
|
134 |
if __name__ == "__main__":
|
135 |
+
demo.launch()
|