File size: 4,546 Bytes
fe2a94a
 
 
 
ac9616e
fe2a94a
ac9616e
fe2a94a
ac9616e
 
fe2a94a
673f548
fe2a94a
 
ac9616e
 
 
 
 
 
 
 
 
 
 
 
db25782
 
ac9616e
 
 
 
 
 
 
 
 
 
 
 
 
 
fe2a94a
 
 
 
 
ac9616e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
import numpy as np
import cv2
import onnxruntime
import gradio as gr
from PIL import Image

# === Upscaler Logic ===
def pre_process(img: np.array) -> np.array:
    img = np.transpose(img[:, :, :3], (2, 0, 1))
    return np.expand_dims(img, axis=0).astype(np.float32)


def post_process(img: np.array) -> np.array:
    img = np.squeeze(img)
    return np.transpose(img, (1, 2, 0))[:, :, ::-1].astype(np.uint8)

# ONNX inference with session cache and GPU if available
def get_session(model_path: str):
    if model_path not in get_session.cache:
        opts = onnxruntime.SessionOptions()
        opts.intra_op_num_threads = 1
        opts.inter_op_num_threads = 1
        providers = ['CUDAExecutionProvider', 'CPUExecutionProvider']
        get_session.cache[model_path] = onnxruntime.InferenceSession(model_path, opts, providers=providers)
    return get_session.cache[model_path]
get_session.cache = {}

def inference(model_path: str, img_array: np.array) -> np.array:
    session = get_session(model_path)
    inputs = {session.get_inputs()[0].name: img_array}
    return session.run(None, inputs)[0]

# PIL to BGR conversion
def convert_pil_to_cv2(image: Image.Image) -> np.array:
    arr = np.array(image)
    if arr.ndim == 2:
        return cv2.cvtColor(arr, cv2.COLOR_GRAY2BGR)
    return arr[:, :, ::-1].copy()

# Upscale handler
def upscale(image, model_choice):
    model_path = f"models/{model_choice}.ort"
    img = convert_pil_to_cv2(image)
    if img.ndim == 2:
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)

    if img.shape[2] == 4:
        alpha = cv2.cvtColor(img[:, :, 3], cv2.COLOR_GRAY2BGR)
        out_a = post_process(inference(model_path, pre_process(alpha)))
        out_a = cv2.cvtColor(out_a, cv2.COLOR_BGR2GRAY)
        rgb = img[:, :, :3]
        out_rgb = post_process(inference(model_path, pre_process(rgb)))
        rgba = cv2.cvtColor(out_rgb, cv2.COLOR_BGR2BGRA)
        rgba[:, :, 3] = out_a
        return rgba
    return post_process(inference(model_path, pre_process(img)))

# === Dark Blue-Grey Theme CSS & Animations ===
custom_css = """
/* Dark Gradient Background */
body .gradio-container {
    background: linear-gradient(135deg, #0d1b2a, #1b263b, #415a77, #1b263b);
    background-size: 400% 400%;
    animation: bgFade 25s ease infinite;
}
@keyframes bgFade {
    0% { background-position: 0% 0%; }
    50% { background-position: 100% 100%; }
    100% { background-position: 0% 0%; }
}

/* Title Styling */
.fancy-title {
    font-family: 'Poppins', sans-serif;
    font-size: 2.8rem;
    background: linear-gradient(90deg, #778da9, #415a77);
    -webkit-background-clip: text;
    -webkit-text-fill-color: transparent;
    animation: fadeInText 2s ease-out;
    text-align: center;
    margin-bottom: 1rem;
}
@keyframes fadeInText {
    0% { opacity: 0; transform: translateY(-10px); }
    100% { opacity: 1; transform: translateY(0); }
}

/* Inputs & Outputs */
.gradio-image, .gradio-gallery {
    animation: fadeIn 1.2s ease-in;
    border-radius: 10px;
    box-shadow: 0 4px 12px rgba(0,0,0,0.5);
    border: 2px solid #415a77;
}
@keyframes fadeIn {
    from { opacity: 0; }
    to { opacity: 1; }
}

/* Radio Hover */
.gradio-radio input[type="radio"] + label:hover {
    transform: scale(1.1);
    color: #e0e1dd;
    transition: transform 0.2s, color 0.2s;
}

/* Button Styling */
.gradio-button {
    background: linear-gradient(90deg, #1b263b, #415a77);
    border: 1px solid #778da9;
    border-radius: 6px;
    color: #e0e1dd;
    font-weight: 600;
    padding: 10px 22px;
    cursor: pointer;
    box-shadow: 0 2px 6px rgba(0,0,0,0.7);
    transition: background 0.3s, transform 0.2s;
}
.gradio-button:hover {
    background: linear-gradient(90deg, #415a77, #1b263b);
    transform: scale(1.03);
}

/* Layout tweaks */
#upscale_btn { margin-top: 1rem; }
.gradio-row { gap: 1rem; }
"""

# === Gradio Blocks App ===
with gr.Blocks(css=custom_css) as demo:
    gr.HTML("<h1 class='fancy-title'>✨ Ultra AI Image Upscaler ✨</h1>")
    with gr.Row():
        inp = gr.Image(type="pil", label="Drop Your Image Here")
        model = gr.Radio([
            "modelx2", "modelx2_25JXL", "modelx4", "minecraft_modelx4"
        ], label="Upscaler Model", value="modelx2")
    btn = gr.Button("Upscale Image", elem_id="upscale_btn")
    out = gr.Image(label="Upscaled Output")
    btn.click(fn=upscale, inputs=[inp, model], outputs=out)
    gr.HTML("<p style='text-align:center; color:#e0e1dd;'>Powered by ONNX Runtime & Gradio Blocks</p>")

if __name__ == "__main__":
    demo.launch()