Commit
·
9b44783
1
Parent(s):
42e8595
Subindo arquivos7
Browse files- app.py +19 -6
- requirements.txt +0 -1
app.py
CHANGED
@@ -4,17 +4,30 @@ from PIL import Image
|
|
4 |
from gtts import gTTS
|
5 |
import numpy as np
|
6 |
import cv2
|
7 |
-
from sklearn.feature_extraction.image import greycomatrix, greycoprops
|
8 |
from transformers import BlipProcessor, BlipForConditionalGeneration, MarianMTModel, MarianTokenizer
|
9 |
|
10 |
# Carregar o modelo YOLOv5
|
11 |
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
|
12 |
|
13 |
-
# Função para
|
14 |
-
def
|
15 |
gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2GRAY)
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
return contrast
|
19 |
|
20 |
# Função para descrever imagem usando BLIP
|
@@ -45,7 +58,7 @@ def process_image(image):
|
|
45 |
mean_rgb = np.mean(np.array(image), axis=(0, 1))
|
46 |
|
47 |
# Análise de textura
|
48 |
-
texture_contrast =
|
49 |
|
50 |
# Descrição da imagem
|
51 |
description = describe_image(image)
|
|
|
4 |
from gtts import gTTS
|
5 |
import numpy as np
|
6 |
import cv2
|
|
|
7 |
from transformers import BlipProcessor, BlipForConditionalGeneration, MarianMTModel, MarianTokenizer
|
8 |
|
9 |
# Carregar o modelo YOLOv5
|
10 |
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
|
11 |
|
12 |
+
# Função para calcular a GLCM e o contraste manualmente
|
13 |
+
def calculate_glcm_contrast(image):
|
14 |
gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2GRAY)
|
15 |
+
max_value = gray_image.max() + 1
|
16 |
+
glcm = np.zeros((max_value, max_value), dtype=np.float64)
|
17 |
+
|
18 |
+
for i in range(gray_image.shape[0] - 1):
|
19 |
+
for j in range(gray_image.shape[1] - 1):
|
20 |
+
x = gray_image[i, j]
|
21 |
+
y = gray_image[i + 1, j + 1]
|
22 |
+
glcm[x, y] += 1
|
23 |
+
|
24 |
+
glcm = glcm / glcm.sum()
|
25 |
+
|
26 |
+
contrast = 0.0
|
27 |
+
for i in range(max_value):
|
28 |
+
for j in range(max_value):
|
29 |
+
contrast += (i - j) ** 2 * glcm[i, j]
|
30 |
+
|
31 |
return contrast
|
32 |
|
33 |
# Função para descrever imagem usando BLIP
|
|
|
58 |
mean_rgb = np.mean(np.array(image), axis=(0, 1))
|
59 |
|
60 |
# Análise de textura
|
61 |
+
texture_contrast = calculate_glcm_contrast(image)
|
62 |
|
63 |
# Descrição da imagem
|
64 |
description = describe_image(image)
|
requirements.txt
CHANGED
@@ -3,6 +3,5 @@ torch
|
|
3 |
Pillow
|
4 |
numpy
|
5 |
opencv-python
|
6 |
-
scikit-learn
|
7 |
transformers
|
8 |
gtts
|
|
|
3 |
Pillow
|
4 |
numpy
|
5 |
opencv-python
|
|
|
6 |
transformers
|
7 |
gtts
|