Agrannya commited on
Commit
ec62215
·
verified ·
1 Parent(s): cc511b9

Upload chestXRay_deploy.ipynb

Browse files
Files changed (1) hide show
  1. chestXRay_deploy.ipynb +930 -0
chestXRay_deploy.ipynb ADDED
@@ -0,0 +1,930 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "nbformat": 4,
3
+ "nbformat_minor": 0,
4
+ "metadata": {
5
+ "colab": {
6
+ "provenance": []
7
+ },
8
+ "kernelspec": {
9
+ "name": "python3",
10
+ "display_name": "Python 3"
11
+ },
12
+ "language_info": {
13
+ "name": "python"
14
+ }
15
+ },
16
+ "cells": [
17
+ {
18
+ "cell_type": "code",
19
+ "execution_count": null,
20
+ "metadata": {
21
+ "colab": {
22
+ "base_uri": "https://localhost:8080/"
23
+ },
24
+ "id": "H7ENjELwQyjR",
25
+ "outputId": "95d2f8a3-9d3f-442f-8106-bd4c89521476"
26
+ },
27
+ "outputs": [
28
+ {
29
+ "output_type": "stream",
30
+ "name": "stdout",
31
+ "text": [
32
+ "Overwriting requirements.txt\n"
33
+ ]
34
+ }
35
+ ],
36
+ "source": [
37
+ "%%writefile requirements.txt\n",
38
+ "gradio\n",
39
+ "tensorflow\n",
40
+ "numpy\n",
41
+ "pillow\n",
42
+ "opencv-python-headless\n"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "source": [
48
+ "!pip install gradio"
49
+ ],
50
+ "metadata": {
51
+ "id": "fbc_INKXRIg-"
52
+ },
53
+ "execution_count": null,
54
+ "outputs": []
55
+ },
56
+ {
57
+ "cell_type": "code",
58
+ "source": [
59
+ "from google.colab import files\n",
60
+ "\n",
61
+ "uploaded = files.upload()\n",
62
+ "\n"
63
+ ],
64
+ "metadata": {
65
+ "colab": {
66
+ "base_uri": "https://localhost:8080/",
67
+ "height": 73
68
+ },
69
+ "id": "EUidXLRmSQs9",
70
+ "outputId": "bb6a5fac-a734-4aa8-8fac-75ebbe6dbbff"
71
+ },
72
+ "execution_count": null,
73
+ "outputs": [
74
+ {
75
+ "output_type": "display_data",
76
+ "data": {
77
+ "text/plain": [
78
+ "<IPython.core.display.HTML object>"
79
+ ],
80
+ "text/html": [
81
+ "\n",
82
+ " <input type=\"file\" id=\"files-db89ec8f-2417-43fa-a522-3ace03ec68d3\" name=\"files[]\" multiple disabled\n",
83
+ " style=\"border:none\" />\n",
84
+ " <output id=\"result-db89ec8f-2417-43fa-a522-3ace03ec68d3\">\n",
85
+ " Upload widget is only available when the cell has been executed in the\n",
86
+ " current browser session. Please rerun this cell to enable.\n",
87
+ " </output>\n",
88
+ " <script>// Copyright 2017 Google LLC\n",
89
+ "//\n",
90
+ "// Licensed under the Apache License, Version 2.0 (the \"License\");\n",
91
+ "// you may not use this file except in compliance with the License.\n",
92
+ "// You may obtain a copy of the License at\n",
93
+ "//\n",
94
+ "// http://www.apache.org/licenses/LICENSE-2.0\n",
95
+ "//\n",
96
+ "// Unless required by applicable law or agreed to in writing, software\n",
97
+ "// distributed under the License is distributed on an \"AS IS\" BASIS,\n",
98
+ "// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
99
+ "// See the License for the specific language governing permissions and\n",
100
+ "// limitations under the License.\n",
101
+ "\n",
102
+ "/**\n",
103
+ " * @fileoverview Helpers for google.colab Python module.\n",
104
+ " */\n",
105
+ "(function(scope) {\n",
106
+ "function span(text, styleAttributes = {}) {\n",
107
+ " const element = document.createElement('span');\n",
108
+ " element.textContent = text;\n",
109
+ " for (const key of Object.keys(styleAttributes)) {\n",
110
+ " element.style[key] = styleAttributes[key];\n",
111
+ " }\n",
112
+ " return element;\n",
113
+ "}\n",
114
+ "\n",
115
+ "// Max number of bytes which will be uploaded at a time.\n",
116
+ "const MAX_PAYLOAD_SIZE = 100 * 1024;\n",
117
+ "\n",
118
+ "function _uploadFiles(inputId, outputId) {\n",
119
+ " const steps = uploadFilesStep(inputId, outputId);\n",
120
+ " const outputElement = document.getElementById(outputId);\n",
121
+ " // Cache steps on the outputElement to make it available for the next call\n",
122
+ " // to uploadFilesContinue from Python.\n",
123
+ " outputElement.steps = steps;\n",
124
+ "\n",
125
+ " return _uploadFilesContinue(outputId);\n",
126
+ "}\n",
127
+ "\n",
128
+ "// This is roughly an async generator (not supported in the browser yet),\n",
129
+ "// where there are multiple asynchronous steps and the Python side is going\n",
130
+ "// to poll for completion of each step.\n",
131
+ "// This uses a Promise to block the python side on completion of each step,\n",
132
+ "// then passes the result of the previous step as the input to the next step.\n",
133
+ "function _uploadFilesContinue(outputId) {\n",
134
+ " const outputElement = document.getElementById(outputId);\n",
135
+ " const steps = outputElement.steps;\n",
136
+ "\n",
137
+ " const next = steps.next(outputElement.lastPromiseValue);\n",
138
+ " return Promise.resolve(next.value.promise).then((value) => {\n",
139
+ " // Cache the last promise value to make it available to the next\n",
140
+ " // step of the generator.\n",
141
+ " outputElement.lastPromiseValue = value;\n",
142
+ " return next.value.response;\n",
143
+ " });\n",
144
+ "}\n",
145
+ "\n",
146
+ "/**\n",
147
+ " * Generator function which is called between each async step of the upload\n",
148
+ " * process.\n",
149
+ " * @param {string} inputId Element ID of the input file picker element.\n",
150
+ " * @param {string} outputId Element ID of the output display.\n",
151
+ " * @return {!Iterable<!Object>} Iterable of next steps.\n",
152
+ " */\n",
153
+ "function* uploadFilesStep(inputId, outputId) {\n",
154
+ " const inputElement = document.getElementById(inputId);\n",
155
+ " inputElement.disabled = false;\n",
156
+ "\n",
157
+ " const outputElement = document.getElementById(outputId);\n",
158
+ " outputElement.innerHTML = '';\n",
159
+ "\n",
160
+ " const pickedPromise = new Promise((resolve) => {\n",
161
+ " inputElement.addEventListener('change', (e) => {\n",
162
+ " resolve(e.target.files);\n",
163
+ " });\n",
164
+ " });\n",
165
+ "\n",
166
+ " const cancel = document.createElement('button');\n",
167
+ " inputElement.parentElement.appendChild(cancel);\n",
168
+ " cancel.textContent = 'Cancel upload';\n",
169
+ " const cancelPromise = new Promise((resolve) => {\n",
170
+ " cancel.onclick = () => {\n",
171
+ " resolve(null);\n",
172
+ " };\n",
173
+ " });\n",
174
+ "\n",
175
+ " // Wait for the user to pick the files.\n",
176
+ " const files = yield {\n",
177
+ " promise: Promise.race([pickedPromise, cancelPromise]),\n",
178
+ " response: {\n",
179
+ " action: 'starting',\n",
180
+ " }\n",
181
+ " };\n",
182
+ "\n",
183
+ " cancel.remove();\n",
184
+ "\n",
185
+ " // Disable the input element since further picks are not allowed.\n",
186
+ " inputElement.disabled = true;\n",
187
+ "\n",
188
+ " if (!files) {\n",
189
+ " return {\n",
190
+ " response: {\n",
191
+ " action: 'complete',\n",
192
+ " }\n",
193
+ " };\n",
194
+ " }\n",
195
+ "\n",
196
+ " for (const file of files) {\n",
197
+ " const li = document.createElement('li');\n",
198
+ " li.append(span(file.name, {fontWeight: 'bold'}));\n",
199
+ " li.append(span(\n",
200
+ " `(${file.type || 'n/a'}) - ${file.size} bytes, ` +\n",
201
+ " `last modified: ${\n",
202
+ " file.lastModifiedDate ? file.lastModifiedDate.toLocaleDateString() :\n",
203
+ " 'n/a'} - `));\n",
204
+ " const percent = span('0% done');\n",
205
+ " li.appendChild(percent);\n",
206
+ "\n",
207
+ " outputElement.appendChild(li);\n",
208
+ "\n",
209
+ " const fileDataPromise = new Promise((resolve) => {\n",
210
+ " const reader = new FileReader();\n",
211
+ " reader.onload = (e) => {\n",
212
+ " resolve(e.target.result);\n",
213
+ " };\n",
214
+ " reader.readAsArrayBuffer(file);\n",
215
+ " });\n",
216
+ " // Wait for the data to be ready.\n",
217
+ " let fileData = yield {\n",
218
+ " promise: fileDataPromise,\n",
219
+ " response: {\n",
220
+ " action: 'continue',\n",
221
+ " }\n",
222
+ " };\n",
223
+ "\n",
224
+ " // Use a chunked sending to avoid message size limits. See b/62115660.\n",
225
+ " let position = 0;\n",
226
+ " do {\n",
227
+ " const length = Math.min(fileData.byteLength - position, MAX_PAYLOAD_SIZE);\n",
228
+ " const chunk = new Uint8Array(fileData, position, length);\n",
229
+ " position += length;\n",
230
+ "\n",
231
+ " const base64 = btoa(String.fromCharCode.apply(null, chunk));\n",
232
+ " yield {\n",
233
+ " response: {\n",
234
+ " action: 'append',\n",
235
+ " file: file.name,\n",
236
+ " data: base64,\n",
237
+ " },\n",
238
+ " };\n",
239
+ "\n",
240
+ " let percentDone = fileData.byteLength === 0 ?\n",
241
+ " 100 :\n",
242
+ " Math.round((position / fileData.byteLength) * 100);\n",
243
+ " percent.textContent = `${percentDone}% done`;\n",
244
+ "\n",
245
+ " } while (position < fileData.byteLength);\n",
246
+ " }\n",
247
+ "\n",
248
+ " // All done.\n",
249
+ " yield {\n",
250
+ " response: {\n",
251
+ " action: 'complete',\n",
252
+ " }\n",
253
+ " };\n",
254
+ "}\n",
255
+ "\n",
256
+ "scope.google = scope.google || {};\n",
257
+ "scope.google.colab = scope.google.colab || {};\n",
258
+ "scope.google.colab._files = {\n",
259
+ " _uploadFiles,\n",
260
+ " _uploadFilesContinue,\n",
261
+ "};\n",
262
+ "})(self);\n",
263
+ "</script> "
264
+ ]
265
+ },
266
+ "metadata": {}
267
+ },
268
+ {
269
+ "output_type": "stream",
270
+ "name": "stdout",
271
+ "text": [
272
+ "Saving chest_xray_weights.h5 to chest_xray_weights.h5\n"
273
+ ]
274
+ }
275
+ ]
276
+ },
277
+ {
278
+ "cell_type": "code",
279
+ "source": [
280
+ "!ls"
281
+ ],
282
+ "metadata": {
283
+ "colab": {
284
+ "base_uri": "https://localhost:8080/"
285
+ },
286
+ "id": "X7itk4Fdu1pA",
287
+ "outputId": "a8772aa3-e115-4dfe-c97f-241b2ec8d148"
288
+ },
289
+ "execution_count": null,
290
+ "outputs": [
291
+ {
292
+ "output_type": "stream",
293
+ "name": "stdout",
294
+ "text": [
295
+ "chest_xray_weights.h5 requirements.txt sample_data\n"
296
+ ]
297
+ }
298
+ ]
299
+ },
300
+ {
301
+ "cell_type": "code",
302
+ "source": [
303
+ "%%writefile app.py\n",
304
+ "import gradio as gr\n",
305
+ "import tensorflow as tf\n",
306
+ "from tensorflow.keras.models import load_model\n",
307
+ "import numpy as np\n",
308
+ "from PIL import Image\n",
309
+ "import cv2\n",
310
+ "from tensorflow.keras.initializers import GlorotUniform\n",
311
+ "from tensorflow.keras.utils import custom_object_scope\n",
312
+ "\n",
313
+ "# Load your trained model (upload your .h5 file to this directory or specify the path)\n",
314
+ "# Use a custom object scope to handle potential compatibility issues with GlorotUniform\n",
315
+ "with custom_object_scope({'GlorotUniform': GlorotUniform}):\n",
316
+ " model = load_model('chest_xray_weights.h5') # Replace with your actual filename\n",
317
+ "\n",
318
+ "\n",
319
+ "# Define your model's classes (update based on your training labels)\n",
320
+ "anatomy_classes = [\n",
321
+ " \"No Finding\",\n",
322
+ " \"Atelectasis\",\n",
323
+ " \"Cardiomegaly\",\n",
324
+ " \"Consolidation\",\n",
325
+ " \"Edema\",\n",
326
+ " \"Effusion\",\n",
327
+ " \"Emphysema\",\n",
328
+ " \"Fibrosis\",\n",
329
+ " \"Hernia\",\n",
330
+ " \"Infiltration\",\n",
331
+ " \"Mass\",\n",
332
+ " \"Nodule\",\n",
333
+ " \"Pneumonia\",\n",
334
+ " \"Pneumothorax\"\n",
335
+ "]\n",
336
+ "\n",
337
+ "def predict_abnormality(image):\n",
338
+ " try:\n",
339
+ " # Preprocess the image (match your training setup)\n",
340
+ " img = Image.fromarray(image).resize((224, 224)) # Adjust size to your model's input\n",
341
+ " img_array = np.array(img)\n",
342
+ "\n",
343
+ " # Convert to grayscale if your model expects it\n",
344
+ " if len(img_array.shape) == 3:\n",
345
+ " img_array = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)\n",
346
+ "\n",
347
+ " img_array = img_array / 255.0 # Normalize\n",
348
+ " img_array = np.expand_dims(img_array, axis=0) # Add batch dimension\n",
349
+ " img_array = np.expand_dims(img_array, axis=-1) # Add channel if needed\n",
350
+ "\n",
351
+ " # Run prediction\n",
352
+ " predictions = model.predict(img_array) # Removed [0] to inspect full output first\n",
353
+ " predicted_index = np.argmax(predictions[0]) # Access the first prediction output\n",
354
+ " confidence = predictions[0][predicted_index]\n",
355
+ "\n",
356
+ " # Format output\n",
357
+ " if predicted_index == 0: # Assuming index 0 is \"Normal\"\n",
358
+ " return f\"No abnormality detected. Confidence: {confidence:.2%}\"\n",
359
+ " else:\n",
360
+ " issue = anatomy_classes[predicted_index]\n",
361
+ " return f\"Detected issue: {issue}. Confidence: {confidence:.2%}. Please consult a doctor.\"\n",
362
+ "\n",
363
+ " except Exception as e:\n",
364
+ " print(f\"Error details: {e}\") # Print error details\n",
365
+ " # Added print statements to help diagnose the error\n",
366
+ " try:\n",
367
+ " print(f\"Shape of predictions: {predictions.shape}\")\n",
368
+ " print(f\"Predictions: {predictions}\")\n",
369
+ " print(f\"Predicted index: {predicted_index}\")\n",
370
+ " print(f\"Number of anatomy classes: {len(anatomy_classes)}\")\n",
371
+ "\n",
372
+ " except NameError:\n",
373
+ " print(\"Predictions or predicted_index not defined before error.\")\n",
374
+ "\n",
375
+ " return f\"Error: {str(e)}. Try another image.\"\n",
376
+ "\n",
377
+ "# Create the Gradio interface\n",
378
+ "demo = gr.Interface(\n",
379
+ " fn=predict_abnormality,\n",
380
+ " inputs=gr.Image(label=\"Upload Chest X-Ray Image\", type=\"numpy\"),\n",
381
+ " outputs=gr.Textbox(label=\"Analysis Result\"),\n",
382
+ " title=\"Chest X-Ray Abnormality Detector\",\n",
383
+ " description=\"Upload an X-ray image to detect potential issues. For educational use only—not a medical diagnosis.\",\n",
384
+ " examples=[[\"sample_xray.jpg\"]], # Add paths to example images if available\n",
385
+ " allow_flagging=\"never\"\n",
386
+ ")\n",
387
+ "\n",
388
+ "if __name__ == \"__main__\":\n",
389
+ " demo.launch()\n"
390
+ ],
391
+ "metadata": {
392
+ "id": "na8gwSs_5rPj"
393
+ },
394
+ "execution_count": null,
395
+ "outputs": []
396
+ },
397
+ {
398
+ "cell_type": "code",
399
+ "source": [
400
+ "import gradio as gr\n",
401
+ "import tensorflow as tf\n",
402
+ "from tensorflow.keras.models import load_model\n",
403
+ "import numpy as np\n",
404
+ "from PIL import Image\n",
405
+ "import cv2\n",
406
+ "from tensorflow.keras.initializers import GlorotUniform\n",
407
+ "from tensorflow.keras.utils import custom_object_scope\n",
408
+ "\n",
409
+ "# Load your trained model (upload your .h5 file to this directory or specify the path)\n",
410
+ "# Use a custom object scope to handle potential compatibility issues with GlorotUniform\n",
411
+ "with custom_object_scope({'GlorotUniform': GlorotUniform}):\n",
412
+ " model = load_model('chest_xray_weights.h5') # Replace with your actual filename\n",
413
+ "\n",
414
+ "\n",
415
+ "# Define your model's classes (update based on your training labels)\n",
416
+ "anatomy_classes = [\n",
417
+ " \"No Finding\",\n",
418
+ " \"Atelectasis\",\n",
419
+ " \"Cardiomegaly\",\n",
420
+ " \"Consolidation\",\n",
421
+ " \"Edema\",\n",
422
+ " \"Effusion\",\n",
423
+ " \"Emphysema\",\n",
424
+ " \"Fibrosis\",\n",
425
+ " \"Hernia\",\n",
426
+ " \"Infiltration\",\n",
427
+ " \"Mass\",\n",
428
+ " \"Nodule\",\n",
429
+ " \"Pneumonia\",\n",
430
+ " \"Pneumothorax\"\n",
431
+ "]\n",
432
+ "\n",
433
+ "def predict_abnormality(image):\n",
434
+ " try:\n",
435
+ " # Preprocess the image (match your training setup)\n",
436
+ " img = Image.fromarray(image).resize((224, 224)) # Adjust size to your model's input\n",
437
+ " img_array = np.array(img)\n",
438
+ "\n",
439
+ " # Convert to grayscale if your model expects it\n",
440
+ " if len(img_array.shape) == 3:\n",
441
+ " img_array = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)\n",
442
+ "\n",
443
+ " img_array = img_array / 255.0 # Normalize\n",
444
+ " img_array = np.expand_dims(img_array, axis=0) # Add batch dimension\n",
445
+ " img_array = np.expand_dims(img_array, axis=-1) # Add channel if needed\n",
446
+ "\n",
447
+ " # Run prediction\n",
448
+ " predictions = model.predict(img_array) # Removed [0] to inspect full output first\n",
449
+ " predicted_index = np.argmax(predictions[0]) # Access the first prediction output\n",
450
+ " confidence = predictions[0][predicted_index]\n",
451
+ "\n",
452
+ " # Format output\n",
453
+ " if predicted_index == 0: # Assuming index 0 is \"Normal\"\n",
454
+ " return f\"No abnormality detected. Confidence: {confidence:.2%}\"\n",
455
+ " else:\n",
456
+ " issue = anatomy_classes[predicted_index]\n",
457
+ " return f\"Detected issue: {issue}. Confidence: {confidence:.2%}. Please consult a doctor.\"\n",
458
+ "\n",
459
+ " except Exception as e:\n",
460
+ " print(f\"Error details: {e}\") # Print error details\n",
461
+ " # Added print statements to help diagnose the error\n",
462
+ " try:\n",
463
+ " print(f\"Shape of predictions: {predictions.shape}\")\n",
464
+ " print(f\"Predictions: {predictions}\")\n",
465
+ " print(f\"Predicted index: {predicted_index}\")\n",
466
+ " print(f\"Number of anatomy classes: {len(anatomy_classes)}\")\n",
467
+ "\n",
468
+ " except NameError:\n",
469
+ " print(\"Predictions or predicted_index not defined before error.\")\n",
470
+ "\n",
471
+ " return f\"Error: {str(e)}. Try another image.\"\n",
472
+ "\n",
473
+ "# Create the Gradio interface\n",
474
+ "demo = gr.Interface(\n",
475
+ " fn=predict_abnormality,\n",
476
+ " inputs=gr.Image(label=\"Upload Chest X-Ray Image\", type=\"numpy\"),\n",
477
+ " outputs=gr.Textbox(label=\"Analysis Result\"),\n",
478
+ " title=\"Chest X-Ray Abnormality Detector\",\n",
479
+ " description=\"Upload an X-ray image to detect potential issues. For educational use only—not a medical diagnosis.\",\n",
480
+ " examples=[[\"sample_xray.jpg\"]], # Add paths to example images if available\n",
481
+ " allow_flagging=\"never\"\n",
482
+ ")\n",
483
+ "\n",
484
+ "if __name__ == \"__main__\":\n",
485
+ " demo.launch()"
486
+ ],
487
+ "metadata": {
488
+ "colab": {
489
+ "base_uri": "https://localhost:8080/",
490
+ "height": 685
491
+ },
492
+ "id": "ABo9ZTOIROmK",
493
+ "outputId": "db3f3be7-9332-46b9-8de9-fa8d76d8c407"
494
+ },
495
+ "execution_count": null,
496
+ "outputs": [
497
+ {
498
+ "output_type": "stream",
499
+ "name": "stderr",
500
+ "text": [
501
+ "/usr/local/lib/python3.11/dist-packages/gradio/interface.py:416: UserWarning: The `allow_flagging` parameter in `Interface` is deprecated.Use `flagging_mode` instead.\n",
502
+ " warnings.warn(\n"
503
+ ]
504
+ },
505
+ {
506
+ "output_type": "stream",
507
+ "name": "stdout",
508
+ "text": [
509
+ "It looks like you are running Gradio on a hosted a Jupyter notebook. For the Gradio app to work, sharing must be enabled. Automatically setting `share=True` (you can turn this off by setting `share=False` in `launch()` explicitly).\n",
510
+ "\n",
511
+ "Colab notebook detected. To show errors in colab notebook, set debug=True in launch()\n",
512
+ "* Running on public URL: https://0b58124b323893b4d0.gradio.live\n",
513
+ "\n",
514
+ "This share link expires in 1 week. For free permanent hosting and GPU upgrades, run `gradio deploy` from the terminal in the working directory to deploy to Hugging Face Spaces (https://huggingface.co/spaces)\n"
515
+ ]
516
+ },
517
+ {
518
+ "output_type": "display_data",
519
+ "data": {
520
+ "text/plain": [
521
+ "<IPython.core.display.HTML object>"
522
+ ],
523
+ "text/html": [
524
+ "<div><iframe src=\"https://0b58124b323893b4d0.gradio.live\" width=\"100%\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
525
+ ]
526
+ },
527
+ "metadata": {}
528
+ }
529
+ ]
530
+ },
531
+ {
532
+ "cell_type": "code",
533
+ "source": [
534
+ "from google.colab import drive\n",
535
+ "drive.mount('/content/drive')"
536
+ ],
537
+ "metadata": {
538
+ "id": "0mtpdQN3w9wr"
539
+ },
540
+ "execution_count": null,
541
+ "outputs": []
542
+ },
543
+ {
544
+ "cell_type": "code",
545
+ "source": [
546
+ "!gradio deploy"
547
+ ],
548
+ "metadata": {
549
+ "colab": {
550
+ "base_uri": "https://localhost:8080/"
551
+ },
552
+ "id": "Eo8HYkzt3vET",
553
+ "outputId": "428c29ab-d2dc-4ecb-b4a3-6b96d4da4532"
554
+ },
555
+ "execution_count": null,
556
+ "outputs": [
557
+ {
558
+ "metadata": {
559
+ "tags": null
560
+ },
561
+ "name": "stdout",
562
+ "output_type": "stream",
563
+ "text": [
564
+ "Creating new Spaces Repo in \u001b[32m'/content'\u001b[0m. Collecting metadata, press Enter to \n",
565
+ "accept default value.\n",
566
+ "Enter Spaces app title [content]: "
567
+ ]
568
+ }
569
+ ]
570
+ },
571
+ {
572
+ "cell_type": "code",
573
+ "source": [
574
+ "!pip install h5py tensorflow\n"
575
+ ],
576
+ "metadata": {
577
+ "id": "GmlxHeAXTP-a"
578
+ },
579
+ "execution_count": null,
580
+ "outputs": []
581
+ },
582
+ {
583
+ "cell_type": "code",
584
+ "source": [
585
+ "# Script: Upload and Handle .h5 File in Colab\n",
586
+ "\n",
587
+ "# Step 1: Import necessary libraries\n",
588
+ "from google.colab import files\n",
589
+ "from tensorflow.keras.models import load_model\n",
590
+ "import h5py\n",
591
+ "import os\n",
592
+ "\n",
593
+ "# Step 2: Upload the .h5 file\n",
594
+ "print(\"Please upload your .h5 file:\")\n",
595
+ "uploaded = files.upload()\n",
596
+ "\n",
597
+ "# Step 3: Get the uploaded filename (assumes single file upload)\n",
598
+ "filename = list(uploaded.keys())[0] # e.g., 'chest_xray_model.h5'\n",
599
+ "print(f\"Uploaded file: {filename}\")\n",
600
+ "\n",
601
+ "# Step 4: Verify file existence and size\n",
602
+ "if os.path.exists(filename):\n",
603
+ " file_size = os.path.getsize(filename) / (1024 * 1024) # Size in MB\n",
604
+ " print(f\"File size: {file_size:.2f} MB\")\n",
605
+ "else:\n",
606
+ " print(\"Upload failed. Please try again.\")\n",
607
+ " raise SystemExit\n",
608
+ "\n",
609
+ "# Step 5: Handle as HDF5 file (general inspection)\n",
610
+ "with h5py.File(filename, 'r') as f:\n",
611
+ " print(\"HDF5 keys:\", list(f.keys()))\n",
612
+ "\n",
613
+ "# Step 6: Load as Keras model (if it's a model file)\n",
614
+ "try:\n",
615
+ " model = load_model(filename)\n",
616
+ " print(\"Model loaded successfully!\")\n",
617
+ " model.summary() # Print model architecture\n",
618
+ "except Exception as e:\n",
619
+ " print(f\"Error loading as Keras model: {e}\")\n",
620
+ "\n",
621
+ "# Optional: Save or process further\n",
622
+ "# model.save('processed_model.h5') # Example: Resave if modified\n"
623
+ ],
624
+ "metadata": {
625
+ "colab": {
626
+ "base_uri": "https://localhost:8080/",
627
+ "height": 56
628
+ },
629
+ "id": "__-Yt8VdTT8b",
630
+ "outputId": "251ea0b2-627f-4e15-e4b7-9859ee027e25"
631
+ },
632
+ "execution_count": null,
633
+ "outputs": [
634
+ {
635
+ "metadata": {
636
+ "tags": null
637
+ },
638
+ "name": "stdout",
639
+ "output_type": "stream",
640
+ "text": [
641
+ "Please upload your .h5 file:\n"
642
+ ]
643
+ },
644
+ {
645
+ "data": {
646
+ "text/html": [
647
+ "\n",
648
+ " <input type=\"file\" id=\"files-58b75fb6-d2ef-4c0f-9173-8ba3c9d66e40\" name=\"files[]\" multiple disabled\n",
649
+ " style=\"border:none\" />\n",
650
+ " <output id=\"result-58b75fb6-d2ef-4c0f-9173-8ba3c9d66e40\">\n",
651
+ " Upload widget is only available when the cell has been executed in the\n",
652
+ " current browser session. Please rerun this cell to enable.\n",
653
+ " </output>\n",
654
+ " <script>// Copyright 2017 Google LLC\n",
655
+ "//\n",
656
+ "// Licensed under the Apache License, Version 2.0 (the \"License\");\n",
657
+ "// you may not use this file except in compliance with the License.\n",
658
+ "// You may obtain a copy of the License at\n",
659
+ "//\n",
660
+ "// http://www.apache.org/licenses/LICENSE-2.0\n",
661
+ "//\n",
662
+ "// Unless required by applicable law or agreed to in writing, software\n",
663
+ "// distributed under the License is distributed on an \"AS IS\" BASIS,\n",
664
+ "// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
665
+ "// See the License for the specific language governing permissions and\n",
666
+ "// limitations under the License.\n",
667
+ "\n",
668
+ "/**\n",
669
+ " * @fileoverview Helpers for google.colab Python module.\n",
670
+ " */\n",
671
+ "(function(scope) {\n",
672
+ "function span(text, styleAttributes = {}) {\n",
673
+ " const element = document.createElement('span');\n",
674
+ " element.textContent = text;\n",
675
+ " for (const key of Object.keys(styleAttributes)) {\n",
676
+ " element.style[key] = styleAttributes[key];\n",
677
+ " }\n",
678
+ " return element;\n",
679
+ "}\n",
680
+ "\n",
681
+ "// Max number of bytes which will be uploaded at a time.\n",
682
+ "const MAX_PAYLOAD_SIZE = 100 * 1024;\n",
683
+ "\n",
684
+ "function _uploadFiles(inputId, outputId) {\n",
685
+ " const steps = uploadFilesStep(inputId, outputId);\n",
686
+ " const outputElement = document.getElementById(outputId);\n",
687
+ " // Cache steps on the outputElement to make it available for the next call\n",
688
+ " // to uploadFilesContinue from Python.\n",
689
+ " outputElement.steps = steps;\n",
690
+ "\n",
691
+ " return _uploadFilesContinue(outputId);\n",
692
+ "}\n",
693
+ "\n",
694
+ "// This is roughly an async generator (not supported in the browser yet),\n",
695
+ "// where there are multiple asynchronous steps and the Python side is going\n",
696
+ "// to poll for completion of each step.\n",
697
+ "// This uses a Promise to block the python side on completion of each step,\n",
698
+ "// then passes the result of the previous step as the input to the next step.\n",
699
+ "function _uploadFilesContinue(outputId) {\n",
700
+ " const outputElement = document.getElementById(outputId);\n",
701
+ " const steps = outputElement.steps;\n",
702
+ "\n",
703
+ " const next = steps.next(outputElement.lastPromiseValue);\n",
704
+ " return Promise.resolve(next.value.promise).then((value) => {\n",
705
+ " // Cache the last promise value to make it available to the next\n",
706
+ " // step of the generator.\n",
707
+ " outputElement.lastPromiseValue = value;\n",
708
+ " return next.value.response;\n",
709
+ " });\n",
710
+ "}\n",
711
+ "\n",
712
+ "/**\n",
713
+ " * Generator function which is called between each async step of the upload\n",
714
+ " * process.\n",
715
+ " * @param {string} inputId Element ID of the input file picker element.\n",
716
+ " * @param {string} outputId Element ID of the output display.\n",
717
+ " * @return {!Iterable<!Object>} Iterable of next steps.\n",
718
+ " */\n",
719
+ "function* uploadFilesStep(inputId, outputId) {\n",
720
+ " const inputElement = document.getElementById(inputId);\n",
721
+ " inputElement.disabled = false;\n",
722
+ "\n",
723
+ " const outputElement = document.getElementById(outputId);\n",
724
+ " outputElement.innerHTML = '';\n",
725
+ "\n",
726
+ " const pickedPromise = new Promise((resolve) => {\n",
727
+ " inputElement.addEventListener('change', (e) => {\n",
728
+ " resolve(e.target.files);\n",
729
+ " });\n",
730
+ " });\n",
731
+ "\n",
732
+ " const cancel = document.createElement('button');\n",
733
+ " inputElement.parentElement.appendChild(cancel);\n",
734
+ " cancel.textContent = 'Cancel upload';\n",
735
+ " const cancelPromise = new Promise((resolve) => {\n",
736
+ " cancel.onclick = () => {\n",
737
+ " resolve(null);\n",
738
+ " };\n",
739
+ " });\n",
740
+ "\n",
741
+ " // Wait for the user to pick the files.\n",
742
+ " const files = yield {\n",
743
+ " promise: Promise.race([pickedPromise, cancelPromise]),\n",
744
+ " response: {\n",
745
+ " action: 'starting',\n",
746
+ " }\n",
747
+ " };\n",
748
+ "\n",
749
+ " cancel.remove();\n",
750
+ "\n",
751
+ " // Disable the input element since further picks are not allowed.\n",
752
+ " inputElement.disabled = true;\n",
753
+ "\n",
754
+ " if (!files) {\n",
755
+ " return {\n",
756
+ " response: {\n",
757
+ " action: 'complete',\n",
758
+ " }\n",
759
+ " };\n",
760
+ " }\n",
761
+ "\n",
762
+ " for (const file of files) {\n",
763
+ " const li = document.createElement('li');\n",
764
+ " li.append(span(file.name, {fontWeight: 'bold'}));\n",
765
+ " li.append(span(\n",
766
+ " `(${file.type || 'n/a'}) - ${file.size} bytes, ` +\n",
767
+ " `last modified: ${\n",
768
+ " file.lastModifiedDate ? file.lastModifiedDate.toLocaleDateString() :\n",
769
+ " 'n/a'} - `));\n",
770
+ " const percent = span('0% done');\n",
771
+ " li.appendChild(percent);\n",
772
+ "\n",
773
+ " outputElement.appendChild(li);\n",
774
+ "\n",
775
+ " const fileDataPromise = new Promise((resolve) => {\n",
776
+ " const reader = new FileReader();\n",
777
+ " reader.onload = (e) => {\n",
778
+ " resolve(e.target.result);\n",
779
+ " };\n",
780
+ " reader.readAsArrayBuffer(file);\n",
781
+ " });\n",
782
+ " // Wait for the data to be ready.\n",
783
+ " let fileData = yield {\n",
784
+ " promise: fileDataPromise,\n",
785
+ " response: {\n",
786
+ " action: 'continue',\n",
787
+ " }\n",
788
+ " };\n",
789
+ "\n",
790
+ " // Use a chunked sending to avoid message size limits. See b/62115660.\n",
791
+ " let position = 0;\n",
792
+ " do {\n",
793
+ " const length = Math.min(fileData.byteLength - position, MAX_PAYLOAD_SIZE);\n",
794
+ " const chunk = new Uint8Array(fileData, position, length);\n",
795
+ " position += length;\n",
796
+ "\n",
797
+ " const base64 = btoa(String.fromCharCode.apply(null, chunk));\n",
798
+ " yield {\n",
799
+ " response: {\n",
800
+ " action: 'append',\n",
801
+ " file: file.name,\n",
802
+ " data: base64,\n",
803
+ " },\n",
804
+ " };\n",
805
+ "\n",
806
+ " let percentDone = fileData.byteLength === 0 ?\n",
807
+ " 100 :\n",
808
+ " Math.round((position / fileData.byteLength) * 100);\n",
809
+ " percent.textContent = `${percentDone}% done`;\n",
810
+ "\n",
811
+ " } while (position < fileData.byteLength);\n",
812
+ " }\n",
813
+ "\n",
814
+ " // All done.\n",
815
+ " yield {\n",
816
+ " response: {\n",
817
+ " action: 'complete',\n",
818
+ " }\n",
819
+ " };\n",
820
+ "}\n",
821
+ "\n",
822
+ "scope.google = scope.google || {};\n",
823
+ "scope.google.colab = scope.google.colab || {};\n",
824
+ "scope.google.colab._files = {\n",
825
+ " _uploadFiles,\n",
826
+ " _uploadFilesContinue,\n",
827
+ "};\n",
828
+ "})(self);\n",
829
+ "</script> "
830
+ ],
831
+ "text/plain": [
832
+ "<IPython.core.display.HTML object>"
833
+ ]
834
+ },
835
+ "metadata": {},
836
+ "output_type": "display_data"
837
+ }
838
+ ]
839
+ },
840
+ {
841
+ "cell_type": "code",
842
+ "metadata": {
843
+ "colab": {
844
+ "base_uri": "https://localhost:8080/"
845
+ },
846
+ "id": "f6c9da65",
847
+ "outputId": "4ffa4d8e-5717-4864-8791-427eb6c0d2cd"
848
+ },
849
+ "source": [
850
+ "# Uninstall current TensorFlow version\n",
851
+ "!pip uninstall tensorflow -y\n",
852
+ "\n",
853
+ "# Install TensorFlow 2.8\n",
854
+ "!pip install tensorflow==2.8\n",
855
+ "\n",
856
+ "# After running this cell, restart the Colab runtime (Runtime -> Restart runtime)\n",
857
+ "# Then, re-run the cell containing your model loading and Gradio interface code (cell ID ABo9ZTOIROmK)"
858
+ ],
859
+ "execution_count": null,
860
+ "outputs": [
861
+ {
862
+ "output_type": "stream",
863
+ "name": "stdout",
864
+ "text": [
865
+ "Found existing installation: tensorflow 2.18.0\n",
866
+ "Uninstalling tensorflow-2.18.0:\n",
867
+ " Successfully uninstalled tensorflow-2.18.0\n",
868
+ "\u001b[31mERROR: Could not find a version that satisfies the requirement tensorflow==2.8 (from versions: 2.12.0rc0, 2.12.0rc1, 2.12.0, 2.12.1, 2.13.0rc0, 2.13.0rc1, 2.13.0rc2, 2.13.0, 2.13.1, 2.14.0rc0, 2.14.0rc1, 2.14.0, 2.14.1, 2.15.0rc0, 2.15.0rc1, 2.15.0, 2.15.0.post1, 2.15.1, 2.16.0rc0, 2.16.1, 2.16.2, 2.17.0rc0, 2.17.0rc1, 2.17.0, 2.17.1, 2.18.0rc0, 2.18.0rc1, 2.18.0rc2, 2.18.0, 2.18.1, 2.19.0rc0, 2.19.0)\u001b[0m\u001b[31m\n",
869
+ "\u001b[0m\u001b[31mERROR: No matching distribution found for tensorflow==2.8\u001b[0m\u001b[31m\n",
870
+ "\u001b[0m"
871
+ ]
872
+ }
873
+ ]
874
+ },
875
+ {
876
+ "cell_type": "code",
877
+ "source": [
878
+ "model.summary()"
879
+ ],
880
+ "metadata": {
881
+ "colab": {
882
+ "base_uri": "https://localhost:8080/"
883
+ },
884
+ "id": "De7ShZSR0RBq",
885
+ "outputId": "f9e2c149-3007-4851-b330-b886238fcc40"
886
+ },
887
+ "execution_count": null,
888
+ "outputs": [
889
+ {
890
+ "output_type": "stream",
891
+ "name": "stdout",
892
+ "text": [
893
+ "Model: \"sequential\"\n",
894
+ "_________________________________________________________________\n",
895
+ " Layer (type) Output Shape Param # \n",
896
+ "=================================================================\n",
897
+ " conv2d (Conv2D) (None, 222, 222, 32) 320 \n",
898
+ " \n",
899
+ " max_pooling2d (MaxPooling2D (None, 111, 111, 32) 0 \n",
900
+ " ) \n",
901
+ " \n",
902
+ " conv2d_1 (Conv2D) (None, 109, 109, 64) 18496 \n",
903
+ " \n",
904
+ " max_pooling2d_1 (MaxPooling (None, 54, 54, 64) 0 \n",
905
+ " 2D) \n",
906
+ " \n",
907
+ " conv2d_2 (Conv2D) (None, 52, 52, 128) 73856 \n",
908
+ " \n",
909
+ " max_pooling2d_2 (MaxPooling (None, 26, 26, 128) 0 \n",
910
+ " 2D) \n",
911
+ " \n",
912
+ " flatten (Flatten) (None, 86528) 0 \n",
913
+ " \n",
914
+ " dense (Dense) (None, 128) 11075712 \n",
915
+ " \n",
916
+ " dropout (Dropout) (None, 128) 0 \n",
917
+ " \n",
918
+ " dense_1 (Dense) (None, 14) 1806 \n",
919
+ " \n",
920
+ "=================================================================\n",
921
+ "Total params: 11,170,190\n",
922
+ "Trainable params: 11,170,190\n",
923
+ "Non-trainable params: 0\n",
924
+ "_________________________________________________________________\n"
925
+ ]
926
+ }
927
+ ]
928
+ }
929
+ ]
930
+ }