import gradio as gr import random import time import json import numpy as np from datetime import datetime import base64 import io from PIL import Image import torch from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline import warnings warnings.filterwarnings('ignore') # Global model cache to avoid reloading model_cache = {} # Available models configuration - 2025 Advanced Free Open Source Models AVAILABLE_MODELS = { "Qwen/Qwen2.5-0.5B-Instruct": { "name": "Qwen 2.5 0.5B (Fast & Efficient)", "description": "Alibaba's Qwen 2.5 - Fast, lightweight model for quick responses", "size": "0.5B", "speciality": "General Purpose, Fast" }, "Qwen/Qwen2.5-1.5B-Instruct": { "name": "Qwen 2.5 1.5B (Balanced)", "description": "Alibaba's Qwen 2.5 - Balanced performance and speed", "size": "1.5B", "speciality": "General Purpose, Balanced" }, "Qwen/Qwen2.5-3B-Instruct": { "name": "Qwen 2.5 3B (High Quality)", "description": "Alibaba's Qwen 2.5 - Higher quality responses", "size": "3B", "speciality": "General Purpose, Quality" }, "microsoft/DialoGPT-medium": { "name": "DialoGPT Medium (Conversational)", "description": "Microsoft's conversational AI model", "size": "Medium", "speciality": "Conversation" }, "microsoft/DialoGPT-large": { "name": "DialoGPT Large (Advanced Chat)", "description": "Microsoft's large conversational AI model", "size": "Large", "speciality": "Advanced Conversation" }, "cardiffnlp/twitter-roberta-base-sentiment-latest": { "name": "RoBERTa Sentiment (Specialized)", "description": "Advanced sentiment analysis model", "size": "Base", "speciality": "Sentiment Analysis" }, "facebook/blenderbot-400M-distill": { "name": "BlenderBot 400M (Conversational)", "description": "Facebook's conversational AI - distilled version", "size": "400M", "speciality": "Chat & Dialog" } } def load_model(model_name, task_type="text-generation"): """Load and cache models to avoid reloading""" cache_key = f"{model_name}_{task_type}" if cache_key in model_cache: return model_cache[cache_key] try: print(f"Loading model: {model_name}") if task_type == "sentiment": pipe = pipeline("sentiment-analysis", model=model_name, return_all_scores=True) elif task_type == "text-generation": # Load tokenizer and model separately for better control tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, device_map="auto" if torch.cuda.is_available() else None, trust_remote_code=True ) pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) else: pipe = pipeline(task_type, model=model_name) model_cache[cache_key] = pipe print(f"Model {model_name} loaded successfully!") return pipe except Exception as e: print(f"Error loading model {model_name}: {str(e)}") return None def enhanced_text_analyzer(text, selected_model): """Enhanced text analysis using advanced Hugging Face models""" if not text: return "Please enter some text to analyze." # Basic stats word_count = len(text.split()) char_count = len(text) sentence_count = text.count('.') + text.count('!') + text.count('?') analysis_results = f""" 📊 **Enhanced Text Analysis Results** **Basic Statistics:** - Words: {word_count} - Characters: {char_count} - Sentences: {sentence_count} - Reading Time: ~{max(1, word_count // 200)} minute(s) **Model Used:** {AVAILABLE_MODELS.get(selected_model, {}).get('name', selected_model)} """ # Advanced sentiment analysis using selected model try: if "sentiment" in selected_model.lower() or "roberta" in selected_model.lower(): sentiment_model = load_model(selected_model, "sentiment") if sentiment_model: sentiment_results = sentiment_model(text) if sentiment_results and len(sentiment_results) > 0: sentiments = sentiment_results[0] if isinstance(sentiment_results[0], list) else sentiment_results analysis_results += "\n **🎭 Advanced Sentiment Analysis:**\n" for sentiment in sentiments: label = sentiment['label'] score = sentiment['score'] emoji = "😊" if label == "POSITIVE" else "😔" if label == "NEGATIVE" else "😐" analysis_results += f" - {label}: {score:.3f} {emoji}\n" else: # Use general model for analysis model = load_model(selected_model, "text-generation") if model: try: prompt = f"Analyze the sentiment and key themes of this text: '{text[:200]}...'\nAnalysis:" result = model(prompt, max_length=150, num_return_sequences=1, temperature=0.7) if result and len(result) > 0: generated_text = result[0]['generated_text'] analysis_text = generated_text.split("Analysis:")[-1].strip() analysis_results += f"\n **🤖 AI Analysis:**\n {analysis_text}\n" except Exception as e: analysis_results += f"\n **Note:** Advanced analysis unavailable ({str(e)})\n" except Exception as e: analysis_results += f"\n **Note:** Model analysis failed: {str(e)}\n" # Fallback basic sentiment positive_words = ['good', 'great', 'excellent', 'amazing', 'wonderful', 'fantastic', 'love', 'happy', 'awesome', 'brilliant'] negative_words = ['bad', 'terrible', 'awful', 'hate', 'sad', 'angry', 'disappointed', 'horrible', 'disgusting', 'annoying'] positive_score = sum(1 for word in positive_words if word in text.lower()) negative_score = sum(1 for word in negative_words if word in text.lower()) if positive_score > negative_score: sentiment = "Positive 😊" elif negative_score > positive_score: sentiment = "Negative 😔" else: sentiment = "Neutral 😐" analysis_results += f""" **📈 Basic Sentiment Analysis:** - Overall Sentiment: {sentiment} - Positive indicators: {positive_score} - Negative indicators: {negative_score} """ return analysis_results def advanced_chat_response(message, history, selected_model): """Advanced chatbot using selected Hugging Face models""" if not message: return history, "" try: model = load_model(selected_model, "text-generation") if model: # Format conversation history for context context = "" if history: for user_msg, bot_msg in history[-3:]: # Last 3 exchanges for context context += f"Human: {user_msg}\nAssistant: {bot_msg}\n" # Create prompt if "qwen" in selected_model.lower(): prompt = f"<|im_start|>system\nYou are a helpful AI assistant.<|im_end|>\n<|im_start|>user\n{message}<|im_end|>\n<|im_start|>assistant\n" else: prompt = f"{context}Human: {message}\nAssistant:" # Generate response try: result = model( prompt, max_length=min(len(prompt.split()) + 100, 512), num_return_sequences=1, temperature=0.7, do_sample=True, pad_token_id=model.tokenizer.eos_token_id if hasattr(model, 'tokenizer') else None ) if result and len(result) > 0: generated_text = result[0]['generated_text'] # Extract just the assistant's response if "Assistant:" in generated_text: response = generated_text.split("Assistant:")[-1].strip() elif "<|im_start|>assistant" in generated_text: response = generated_text.split("<|im_start|>assistant")[-1].replace("<|im_end|>", "").strip() else: response = generated_text[len(prompt):].strip() # Clean up response response = response.split("Human:")[0].strip() response = response.split("<|im_start|>")[0].strip() if response: history.append([message, response]) return history, "" except Exception as e: print(f"Error generating response: {e}") # Fallback responses fallback_responses = { "hello": "Hello! I'm powered by advanced Hugging Face models. How can I help you today? 🤖", "hi": "Hi there! I'm using state-of-the-art language models. What would you like to know? 😊", "how are you": "I'm running great on advanced AI models! Thanks for asking. How are you?", "what can you do": "I can help with text analysis, coding, conversation, and more using cutting-edge Hugging Face models!", "time": f"The current time is {datetime.now().strftime('%H:%M:%S')}", "date": f"Today's date is {datetime.now().strftime('%Y-%m-%d')}", "models": f"I'm currently using {AVAILABLE_MODELS.get(selected_model, {}).get('name', selected_model)} for our conversation!", } message_lower = message.lower() response = None for key, value in fallback_responses.items(): if key in message_lower: response = value break if not response: if "?" in message: response = f"That's an interesting question! I'm powered by {AVAILABLE_MODELS.get(selected_model, {}).get('name', 'advanced models')} but I might need more context. Can you tell me more? 🤔" else: response = f"Thanks for sharing! I'm using {AVAILABLE_MODELS.get(selected_model, {}).get('name', 'an advanced model')} to assist you. How can I help further? 💭" history.append([message, response]) return history, "" except Exception as e: error_response = f"I encountered an issue with the {AVAILABLE_MODELS.get(selected_model, {}).get('name', 'selected model')}. Let me help you with a simpler response! 😅" history.append([message, error_response]) return history, "" def code_generator(description, language, selected_model): """Generate code using advanced language models""" if not description: return "Please provide a description of what code you want to generate." try: model = load_model(selected_model, "text-generation") if model: if "qwen" in selected_model.lower(): prompt = f"<|im_start|>system\nYou are an expert programmer.<|im_end|>\n<|im_start|>user\nGenerate {language} code for: {description}<|im_end|>\n<|im_start|>assistant\n" else: prompt = f"Generate {language} code for the following requirement:\n\n{description}\n\nCode:\n```{language.lower()}\n" result = model( prompt, max_length=min(len(prompt.split()) + 200, 1024), num_return_sequences=1, temperature=0.3, do_sample=True, pad_token_id=model.tokenizer.eos_token_id if hasattr(model, 'tokenizer') else None ) if result and len(result) > 0: generated_text = result[0]['generated_text'] # Extract code if "```" in generated_text: code_parts = generated_text.split("```") if len(code_parts) > 1: code = code_parts[1].strip() if code.startswith(language.lower()): code = code[len(language):].strip() return code elif "<|im_start|>assistant" in generated_text: code = generated_text.split("<|im_start|>assistant")[-1].replace("<|im_end|>", "").strip() return code else: code = generated_text[len(prompt):].strip() return code except Exception as e: print(f"Error generating code: {e}") # Fallback code templates fallback_templates = { "Python": f"""# Generated code for: {description} def main(): # TODO: Implement {description} print("Hello from generated code!") return True if __name__ == "__main__": main()""", "JavaScript": f"""// Generated code for: {description} function main() {{ // TODO: Implement {description} console.log("Hello from generated code!"); return true; }} main();""", "Java": f"""// Generated code for: {description} public class GeneratedCode {{ public static void main(String[] args) {{ // TODO: Implement {description} System.out.println("Hello from generated code!"); }} }}""", } return fallback_templates.get(language, f"// {description}\n// Code generation not available for {language}") def enhanced_code_formatter(code, language): """Enhanced code analysis and formatting""" if not code: return "Please enter some code to analyze." lines = code.split('\n') line_count = len(lines) non_empty_lines = len([l for l in lines if l.strip()]) analysis = f""" 💻 **Enhanced Code Analysis ({language})** **📊 Statistics:** - Total lines: {line_count} - Non-empty lines: {non_empty_lines} - Blank lines: {line_count - non_empty_lines} """ if language.lower() == 'python': functions = [line for line in lines if 'def ' in line] classes = [line for line in lines if 'class ' in line] imports = [line for line in lines if line.strip().startswith('import') or line.strip().startswith('from')] comments = [line for line in lines if line.strip().startswith('#')] analysis += f""" **🐍 Python Specifics:** - Functions: {len(functions)} - Classes: {len(classes)} - Import statements: {len(imports)} - Comments: {len(comments)} **🔍 Functions found:** {chr(10).join([' - ' + func.strip() for func in functions[:10]])} """ elif language.lower() == 'javascript': functions = [line for line in lines if 'function ' in line or '=>' in line] classes = [line for line in lines if 'class ' in line] imports = [line for line in lines if 'import ' in line or 'require(' in line] comments = [line for line in lines if line.strip().startswith('//') or line.strip().startswith('/*')] analysis += f""" **🟨 JavaScript Specifics:** - Functions: {len(functions)} - Classes: {len(classes)} - Import/Require statements: {len(imports)} - Comments: {len(comments)} **🔍 Functions found:** {chr(10).join([' - ' + func.strip() for func in functions[:10]])} """ else: comments = [line for line in lines if line.strip().startswith('//') or line.strip().startswith('#') or line.strip().startswith('/*')] analysis += f""" **📝 General Analysis:** - Comments: {len(comments)} - Average line length: {sum(len(line) for line in lines) / len(lines):.1f} characters """ # Code complexity estimation complexity_indicators = ['if ', 'for ', 'while ', 'switch ', 'case ', 'try ', 'catch ', 'elif '] complexity_count = sum(1 for line in lines for indicator in complexity_indicators if indicator in line.lower()) if complexity_count == 0: complexity = "Simple" elif complexity_count <= 5: complexity = "Moderate" elif complexity_count <= 15: complexity = "Complex" else: complexity = "Very Complex" analysis += f""" **🎯 Complexity Analysis:** - Control structures: {complexity_count} - Estimated complexity: {complexity} - Code density: {(non_empty_lines / line_count * 100):.1f}% """ return analysis # Create the enhanced Gradio interface with advanced models with gr.Blocks( theme=gr.themes.Soft( primary_hue="blue", secondary_hue="gray", neutral_hue="slate" ), title="🤖 Advanced AI Hub with Hugging Face Models 2025", css=""" .gradio-container { max-width: 1400px !important; } .tab-nav button { font-size: 16px !important; font-weight: 600 !important; } .model-info { background: linear-gradient(45deg, #667eea 0%, #764ba2 100%); color: white; padding: 15px; border-radius: 10px; margin: 10px 0; } """ ) as demo: gr.Markdown(""" # 🤖 Advanced AI Hub with 2025 Hugging Face Models ### Powered by the latest and most advanced free open-source language models! 🚀 **Featured Models**: Qwen 2.5 Series, DialoGPT, RoBERTa, BlenderBot and more! """) # Global model selector with gr.Row(): model_selector = gr.Dropdown( choices=list(AVAILABLE_MODELS.keys()), value="Qwen/Qwen2.5-1.5B-Instruct", label="🧠 Select AI Model", info="Choose the AI model for analysis and chat" ) model_info = gr.HTML( value=f"""