File size: 2,632 Bytes
03a7dca
 
 
 
 
 
42e8595
03a7dca
 
2a13288
03a7dca
 
2a13288
03a7dca
 
 
 
 
 
2a13288
03a7dca
 
 
 
 
 
 
 
2a13288
03a7dca
 
 
 
 
 
 
 
2a13288
03a7dca
2a13288
03a7dca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2a13288
03a7dca
 
2a13288
03a7dca
 
2a13288
03a7dca
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
import gradio as gr
import torch
from PIL import Image
from gtts import gTTS
import numpy as np
import cv2
from sklearn.feature_extraction.image import greycomatrix, greycoprops
from transformers import BlipProcessor, BlipForConditionalGeneration, MarianMTModel, MarianTokenizer

# Carregar o modelo YOLOv5
model = torch.hub.load('ultralytics/yolov5', 'yolov5s')

# Função para análise de textura usando GLCM
def analyze_texture(image):
    gray_image = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2GRAY)
    glcm = greycomatrix(gray_image, distances=[5], angles=[0], levels=256, symmetric=True, normed=True)
    contrast = greycoprops(glcm, 'contrast')[0, 0]
    return contrast

# Função para descrever imagem usando BLIP
def describe_image(image):
    processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
    model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
    inputs = processor(image, return_tensors="pt")
    out = model.generate(**inputs)
    description = processor.decode(out[0], skip_special_tokens=True)
    return description

# Função para traduzir descrição para português
def translate_description(description):
    model_name = 'Helsinki-NLP/opus-mt-en-pt'
    tokenizer = MarianTokenizer.from_pretrained(model_name)
    model = MarianMTModel.from_pretrained(model_name)
    translated = model.generate(**tokenizer(description, return_tensors="pt", padding=True))
    translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
    return translated_text

# Função principal para processar imagem e gerar saída de voz
def process_image(image):
    # Detecção de objetos
    results = model(image)
    detected_image = results.render()[0]

    # Análise de cor (média RGB)
    mean_rgb = np.mean(np.array(image), axis=(0, 1))

    # Análise de textura
    texture_contrast = analyze_texture(image)

    # Descrição da imagem
    description = describe_image(image)
    translated_description = translate_description(description)

    # Texto para voz
    tts = gTTS(text=translated_description, lang='pt')
    tts.save("output.mp3")

    # Retornar imagem com detecções, descrição e áudio
    return Image.fromarray(detected_image), translated_description, "output.mp3"

# Carregar imagem de exemplo
example_image = Image.open("/mnt/data/example1.JPG")

# Interface Gradio
iface = gr.Interface(
    fn=process_image,
    inputs=gr.inputs.Image(type="pil"),
    outputs=[gr.outputs.Image(type="pil"), gr.outputs.Textbox(), gr.outputs.Audio(type="file")],
    examples=[example_image]
)

iface.launch()