from main import * from tts_api import tts_api as tts_module_api from stt_api import stt_api as stt_module_api from sentiment_api import sentiment_api as sentiment_module_api from imagegen_api import imagegen_api as imagegen_module_api from musicgen_api import musicgen_api as musicgen_module_api from translation_api import translation_api as translation_module_api from codegen_api import codegen_api as codegen_module_api from text_to_video_api import text_to_video_api as text_to_video_module_api from summarization_api import summarization_api as summarization_module_api from image_to_3d_api import image_to_3d_api as image_to_3d_module_api from xtts_api import xtts_api as xtts_module_api from flask import Flask, request, jsonify, Response, send_file, stream_with_context from flask_cors import CORS import io import queue import base64 import gradio as gr app = Flask(__name__) CORS(app) html_code = """ AI Text Generation
AI POWERED
Response:
""" feedback_queue = queue.Queue() @app.route("/") def index(): return html_code @app.route("/api/v1/generate_stream", methods=["GET"]) def generate_stream(): text = request.args.get("text", "") temp = float(request.args.get("temp", 0.7)) top_k = int(request.args.get("top_k", 40)) top_p = float(request.args.get("top_p", 0.0)) reppenalty = float(request.args.get("reppenalty", 1.2)) response_queue = queue.Queue() reasoning_queue.put({ 'text_input': text, 'temperature': temp, 'top_k': top_k, 'top_p': top_p, 'repetition_penalty': reppenalty, 'response_queue': response_queue }) @stream_with_context def event_stream(): while True: output = response_queue.get() if "error" in output: yield "data: \n\n" break text_chunk = output.get("text") if text_chunk: for word in text_chunk.split(' '): clean_word = word.strip() if clean_word: yield "data: " + clean_word + "\n\n" yield "data: \n\n" break return Response(event_stream(), mimetype="text/event-stream") @app.route("/api/v1/generate", methods=["POST"]) def generate(): data = request.get_json() text = data.get("text", "") temp = float(data.get("temp", 0.7)) top_k = int(data.get("top_k", 40)) top_p = float(data.get("top_p", 0.0)) reppenalty = float(data.get("reppenalty", 1.2)) response_queue = queue.Queue() reasoning_queue.put({ 'text_input': text, 'temperature': temp, 'top_k': top_k, 'top_p': top_p, 'repetition_penalty': reppenalty, 'response_queue': response_queue }) output = response_queue.get() if "error" in output: return jsonify({"error": output["error"]}), 500 result_text = output.get("text", "").strip() return jsonify({"response": result_text}) @app.route("/api/v1/feedback", methods=["POST"]) def feedback(): data = request.get_json() feedback_text = data.get("feedback_text") correct_category = data.get("correct_category") if feedback_text and correct_category: feedback_queue.put((feedback_text, correct_category)) return jsonify({"status": "feedback received"}) return jsonify({"status": "feedback failed"}), 400 @app.route("/api/v1/tts", methods=["POST"]) def tts_api(): return tts_module_api() @app.route("/api/v1/stt", methods=["POST"]) def stt_api(): return stt_module_api() @app.route("/api/v1/sentiment", methods=["POST"]) def sentiment_api(): return sentiment_module_api() @app.route("/api/v1/imagegen", methods=["POST"]) def imagegen_api(): return imagegen_module_api() @app.route("/api/v1/musicgen", methods=["POST"]) def musicgen_api(): return musicgen_module_api() @app.route("/api/v1/translation", methods=["POST"]) def translation_api(): return translation_module_api() @app.route("/api/v1/codegen", methods=["POST"]) def codegen_api(): return codegen_module_api() @app.route("/api/v1/text_to_video", methods=["POST"]) def text_to_video_api(): return text_to_video_module_api() @app.route("/api/v1/summarization", methods=["POST"]) def summarization_api(): return summarization_module_api() @app.route("/api/v1/image_to_3d", methods=["POST"]) def image_to_3d_api(): return image_to_3d_module_api() @app.route("/api/v1/xtts_clone", methods=["POST"]) def xtts_clone_api(): return xtts_module_api() @app.route("/api/v1/sadtalker", methods=["POST"]) def sadtalker(): from sadtalker_api import router as sadtalker_router return sadtalker_router.create_video() if __name__ == "__main__": with gr.Blocks() as demo: gr.Markdown("## AI Powerhouse") with gr.Tab("Text Generation"): text_input = gr.Textbox(lines=5, placeholder="Enter text") text_output = gr.Markdown() text_button = gr.Button("Generate Text") text_button.click(generate, inputs=text_input, outputs=text_output) with gr.Tab("Image Generation"): image_text_input = gr.Textbox(lines=3, placeholder="Enter prompt for image") image_output = gr.Image() image_button = gr.Button("Generate Image") image_button.click(imagegen_api, inputs=image_text_input, outputs=image_output) with gr.Tab("Music Generation"): music_text_input = gr.Textbox(lines=3, placeholder="Enter prompt for music") music_output = gr.Audio() music_button = gr.Button("Generate Music") music_button.click(musicgen_api, inputs=music_text_input, outputs=music_output) with gr.Tab("Code Generation"): code_text_input = gr.Textbox(lines=3, placeholder="Enter prompt for code") code_output = gr.File() code_button = gr.Button("Generate Code") code_button.click(codegen_api, inputs=code_text_input, outputs=code_output) with gr.Tab("Text to Video"): video_text_input = gr.Textbox(lines=3, placeholder="Enter prompt for video") video_output = gr.Video() video_button = gr.Button("Generate Video") video_button.click(text_to_video_api, inputs=video_text_input, outputs=video_output) with gr.Tab("Summarization"): summary_text_input = gr.Textbox(lines=5, placeholder="Enter text to summarize") summary_output = gr.Textbox() summary_button = gr.Button("Summarize") summary_button.click(summarization_api, inputs=summary_text_input, outputs=summary_output) with gr.Tab("Translation"): translate_text_input = gr.Textbox(lines=3, placeholder="Enter text to translate") translate_lang_dropdown = gr.Dropdown(['es', 'en', 'fr', 'de'], value='es', label="Target Language") translation_output = gr.Textbox() translate_button = gr.Button("Translate") translate_button.click(translation_api, inputs=[translate_text_input, translate_lang_dropdown], outputs=translation_output) with gr.Tab("Sentiment Analysis"): sentiment_text_input = gr.Textbox(lines=3, placeholder="Enter text for sentiment analysis") sentiment_output = gr.Textbox() sentiment_button = gr.Button("Analyze Sentiment") sentiment_button.click(sentiment_api, inputs=sentiment_text_input, outputs=sentiment_output) with gr.Tab("Text to Speech"): tts_text_input = gr.Textbox(lines=3, placeholder="Enter text for speech") tts_output = gr.Audio() tts_button = gr.Button("Generate Speech") tts_button.click(tts_api, inputs=tts_text_input, outputs=tts_output) with gr.Tab("Voice Cloning (XTTS)"): xtts_text_input = gr.Textbox(lines=3, placeholder="Enter text for voice cloning") xtts_audio_input = gr.Audio(source="upload", type="filepath", label="Reference Audio for Voice Cloning") xtts_output = gr.Audio() xtts_button = gr.Button("Clone Voice") xtts_button.click(xtts_module_api, inputs=[xtts_text_input, xtts_audio_input], outputs=xtts_output) with gr.Tab("Speech to Text"): stt_audio_input = gr.Audio(source="microphone", type="filepath") stt_output = gr.Textbox() stt_button = gr.Button("Transcribe Speech") stt_button.click(stt_api, inputs=stt_audio_input, outputs=stt_output) with gr.Tab("Image to 3D"): image_3d_input = gr.Image(source="upload", type="filepath") model_3d_output = gr.File() image_3d_button = gr.Button("Generate 3D Model") image_3d_button.click(image_to_3d_api, inputs=image_3d_input, outputs=model_3d_output) app = gr.routes.App(demo) app.run(host="0.0.0.0", port=7860)