Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,9 +11,10 @@ import torch
|
|
11 |
import whisper
|
12 |
from moviepy.editor import VideoFileClip
|
13 |
from pydub import AudioSegment
|
14 |
-
import fitz
|
15 |
-
import docx
|
16 |
import yt_dlp
|
|
|
17 |
|
18 |
# Configure logging
|
19 |
logging.basicConfig(
|
@@ -22,87 +23,120 @@ logging.basicConfig(
|
|
22 |
)
|
23 |
logger = logging.getLogger(__name__)
|
24 |
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
-
#
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
if None in (tokenizer, model, news_generator, whisper_model):
|
40 |
-
raise RuntimeError("Models not properly initialized. Please ensure initialization was successful.")
|
41 |
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
54 |
-
model_name,
|
55 |
-
token=HUGGINGFACE_TOKEN
|
56 |
-
)
|
57 |
-
if tokenizer is None:
|
58 |
-
raise RuntimeError("Failed to initialize tokenizer")
|
59 |
-
tokenizer.pad_token = tokenizer.eos_token
|
60 |
-
|
61 |
-
# Load model
|
62 |
-
logger.info("Loading model...")
|
63 |
-
model = AutoModelForCausalLM.from_pretrained(
|
64 |
-
model_name,
|
65 |
-
token=HUGGINGFACE_TOKEN,
|
66 |
-
torch_dtype=torch.bfloat16,
|
67 |
-
device_map="auto",
|
68 |
-
low_cpu_mem_usage=True
|
69 |
-
)
|
70 |
-
if model is None:
|
71 |
-
raise RuntimeError("Failed to initialize model")
|
72 |
-
|
73 |
-
# Create pipeline
|
74 |
-
logger.info("Creating pipeline...")
|
75 |
-
news_generator = pipeline(
|
76 |
-
"text-generation",
|
77 |
-
model=model,
|
78 |
-
tokenizer=tokenizer,
|
79 |
-
device_map="auto",
|
80 |
-
torch_dtype=torch.bfloat16,
|
81 |
-
max_length=2048,
|
82 |
-
do_sample=True,
|
83 |
-
temperature=0.7,
|
84 |
-
top_p=0.95,
|
85 |
-
repetition_penalty=1.2
|
86 |
-
)
|
87 |
-
if news_generator is None:
|
88 |
-
raise RuntimeError("Failed to initialize news generator pipeline")
|
89 |
-
|
90 |
-
# Load Whisper model
|
91 |
-
logger.info("Loading Whisper model...")
|
92 |
-
whisper_model = whisper.load_model("base")
|
93 |
-
if whisper_model is None:
|
94 |
-
raise RuntimeError("Failed to initialize Whisper model")
|
95 |
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
model
|
103 |
-
|
104 |
-
|
105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
106 |
|
107 |
def download_social_media_video(url):
|
108 |
"""Download a video from social media."""
|
@@ -154,8 +188,8 @@ def preprocess_audio(audio_file):
|
|
154 |
def transcribe_audio(file):
|
155 |
"""Transcribe an audio or video file."""
|
156 |
try:
|
157 |
-
#
|
158 |
-
|
159 |
|
160 |
if isinstance(file, str) and file.startswith('http'):
|
161 |
file_path = download_social_media_video(file)
|
@@ -231,8 +265,8 @@ def process_social_content(url):
|
|
231 |
@spaces.GPU(duration=60)
|
232 |
def generate_news(instructions, facts, size, tone, *args):
|
233 |
try:
|
234 |
-
#
|
235 |
-
|
236 |
|
237 |
# Initialize knowledge base
|
238 |
knowledge_base = {
|
@@ -358,14 +392,14 @@ Follow these requirements:
|
|
358 |
|
359 |
except Exception as e:
|
360 |
logger.error(f"Error generating news: {str(e)}")
|
361 |
-
# Try to reinitialize models if they're not working
|
362 |
try:
|
363 |
-
|
|
|
364 |
logger.info("Models reinitialized successfully")
|
365 |
except Exception as reinit_error:
|
366 |
logger.error(f"Failed to reinitialize models: {str(reinit_error)}")
|
367 |
return f"Error generating the news article: {str(e)}", ""
|
368 |
-
|
369 |
def create_demo():
|
370 |
with gr.Blocks() as demo:
|
371 |
gr.Markdown("## Generador de noticias todo en uno")
|
|
|
11 |
import whisper
|
12 |
from moviepy.editor import VideoFileClip
|
13 |
from pydub import AudioSegment
|
14 |
+
import fitz
|
15 |
+
import docx
|
16 |
import yt_dlp
|
17 |
+
from functools import lru_cache
|
18 |
|
19 |
# Configure logging
|
20 |
logging.basicConfig(
|
|
|
23 |
)
|
24 |
logger = logging.getLogger(__name__)
|
25 |
|
26 |
+
class ModelManager:
|
27 |
+
_instance = None
|
28 |
+
|
29 |
+
def __new__(cls):
|
30 |
+
if cls._instance is None:
|
31 |
+
cls._instance = super(ModelManager, cls).__new__(cls)
|
32 |
+
cls._instance._initialized = False
|
33 |
+
return cls._instance
|
34 |
+
|
35 |
+
def __init__(self):
|
36 |
+
if not self._initialized:
|
37 |
+
self.tokenizer = None
|
38 |
+
self.model = None
|
39 |
+
self.news_generator = None
|
40 |
+
self.whisper_model = None
|
41 |
+
self._initialized = True
|
42 |
+
|
43 |
+
@spaces.GPU(duration=60)
|
44 |
+
def initialize_models(self):
|
45 |
+
"""Initialize models with Zero GPU optimizations"""
|
46 |
+
try:
|
47 |
+
# Get HuggingFace token
|
48 |
+
HUGGINGFACE_TOKEN = os.environ.get('HUGGINGFACE_TOKEN')
|
49 |
+
if not HUGGINGFACE_TOKEN:
|
50 |
+
raise ValueError("HUGGINGFACE_TOKEN environment variable not set")
|
51 |
+
|
52 |
+
logger.info("Starting model initialization...")
|
53 |
+
model_name = "meta-llama/Llama-2-7b-chat-hf"
|
54 |
+
|
55 |
+
# Load tokenizer
|
56 |
+
logger.info("Loading tokenizer...")
|
57 |
+
self.tokenizer = AutoTokenizer.from_pretrained(
|
58 |
+
model_name,
|
59 |
+
token=HUGGINGFACE_TOKEN,
|
60 |
+
use_fast=False
|
61 |
+
)
|
62 |
+
if self.tokenizer is None:
|
63 |
+
raise RuntimeError("Failed to initialize tokenizer")
|
64 |
+
self.tokenizer.pad_token = self.tokenizer.eos_token
|
65 |
+
|
66 |
+
# Load model with specific GPU memory settings
|
67 |
+
logger.info("Loading model...")
|
68 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
69 |
+
model_name,
|
70 |
+
token=HUGGINGFACE_TOKEN,
|
71 |
+
torch_dtype=torch.float16,
|
72 |
+
device_map="auto",
|
73 |
+
low_cpu_mem_usage=True,
|
74 |
+
max_memory={0: "8GiB"}
|
75 |
+
)
|
76 |
+
if self.model is None:
|
77 |
+
raise RuntimeError("Failed to initialize model")
|
78 |
+
|
79 |
+
# Create pipeline
|
80 |
+
logger.info("Creating pipeline...")
|
81 |
+
self.news_generator = pipeline(
|
82 |
+
"text-generation",
|
83 |
+
model=self.model,
|
84 |
+
tokenizer=self.tokenizer,
|
85 |
+
device_map="auto",
|
86 |
+
torch_dtype=torch.float16,
|
87 |
+
max_length=2048,
|
88 |
+
do_sample=True,
|
89 |
+
temperature=0.7,
|
90 |
+
top_p=0.95,
|
91 |
+
repetition_penalty=1.2
|
92 |
+
)
|
93 |
+
if self.news_generator is None:
|
94 |
+
raise RuntimeError("Failed to initialize news generator pipeline")
|
95 |
|
96 |
+
# Load Whisper model
|
97 |
+
logger.info("Loading Whisper model...")
|
98 |
+
self.whisper_model = whisper.load_model("base", device="cuda")
|
99 |
+
if self.whisper_model is None:
|
100 |
+
raise RuntimeError("Failed to initialize Whisper model")
|
101 |
|
102 |
+
logger.info("All models initialized successfully")
|
103 |
+
return True
|
|
|
|
|
104 |
|
105 |
+
except Exception as e:
|
106 |
+
logger.error(f"Error during model initialization: {str(e)}")
|
107 |
+
self.reset_models()
|
108 |
+
raise
|
109 |
+
|
110 |
+
def reset_models(self):
|
111 |
+
"""Reset all models to None"""
|
112 |
+
self.tokenizer = None
|
113 |
+
self.model = None
|
114 |
+
self.news_generator = None
|
115 |
+
self.whisper_model = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
|
117 |
+
# Clear CUDA cache
|
118 |
+
if torch.cuda.is_available():
|
119 |
+
torch.cuda.empty_cache()
|
120 |
+
|
121 |
+
def check_models_initialized(self):
|
122 |
+
"""Check if all models are properly initialized"""
|
123 |
+
if None in (self.tokenizer, self.model, self.news_generator, self.whisper_model):
|
124 |
+
logger.warning("Models not initialized, attempting to initialize...")
|
125 |
+
self.initialize_models()
|
126 |
+
|
127 |
+
def get_models(self):
|
128 |
+
"""Get initialized models, initializing if necessary"""
|
129 |
+
self.check_models_initialized()
|
130 |
+
return self.tokenizer, self.model, self.news_generator, self.whisper_model
|
131 |
+
|
132 |
+
# Create global model manager instance
|
133 |
+
model_manager = ModelManager()
|
134 |
+
|
135 |
+
# Initialize models at startup
|
136 |
+
try:
|
137 |
+
model_manager.initialize_models()
|
138 |
+
except Exception as e:
|
139 |
+
logger.error(f"Initial model initialization failed: {str(e)}")
|
140 |
|
141 |
def download_social_media_video(url):
|
142 |
"""Download a video from social media."""
|
|
|
188 |
def transcribe_audio(file):
|
189 |
"""Transcribe an audio or video file."""
|
190 |
try:
|
191 |
+
# Get initialized models
|
192 |
+
_, _, _, whisper_model = model_manager.get_models()
|
193 |
|
194 |
if isinstance(file, str) and file.startswith('http'):
|
195 |
file_path = download_social_media_video(file)
|
|
|
265 |
@spaces.GPU(duration=60)
|
266 |
def generate_news(instructions, facts, size, tone, *args):
|
267 |
try:
|
268 |
+
# Get initialized models
|
269 |
+
tokenizer, _, news_generator, _ = model_manager.get_models()
|
270 |
|
271 |
# Initialize knowledge base
|
272 |
knowledge_base = {
|
|
|
392 |
|
393 |
except Exception as e:
|
394 |
logger.error(f"Error generating news: {str(e)}")
|
|
|
395 |
try:
|
396 |
+
model_manager.reset_models()
|
397 |
+
model_manager.initialize_models()
|
398 |
logger.info("Models reinitialized successfully")
|
399 |
except Exception as reinit_error:
|
400 |
logger.error(f"Failed to reinitialize models: {str(reinit_error)}")
|
401 |
return f"Error generating the news article: {str(e)}", ""
|
402 |
+
|
403 |
def create_demo():
|
404 |
with gr.Blocks() as demo:
|
405 |
gr.Markdown("## Generador de noticias todo en uno")
|