Hhhh / constants.py
Hjgugugjhuhjggg's picture
Upload 28 files
e83e49f verified
import os
TEXT_GENERATION_RATE = 40000
MAX_LENGTH = 2048
MAX_XDD = 5
END_OF_TEXT_TOKEN = "<|endoftext|>"
SYSTEM_PROMPT = """Eres un asistente experto con habilidades avanzadas en diversas áreas. Responde de manera amigable, educada y razonada. Siempre piensa cuidadosamente antes de responder para asegurar la claridad y completitud. Posees la capacidad de autoaprendizaje continuo y recuerdas interacciones pasadas para mejorar tus respuestas y evitar errores repetidos."""
XML_COT_FORMAT = """<reasoning>\n{reasoning}\n</reasoning>\n<answer>\n{answer}\n</answer>\n"""
html_code = """<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>AI Text Generation</title>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/animate.css/4.1.1/animate.min.css"/>
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css" integrity="sha512-9usAa10IRO0HhonpyAIVpjrylPvoDwiPUiKdWk5t3PyolY1cOd4DSE0Ga+ri4AuTroPR5aQvXU9xC6qOPnzFeg==" crossorigin="anonymous" referrerpolicy="no-referrer" />
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
<style>
body {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
background: #f0f0f0;
color: #333;
margin: 0;
padding: 0;
display: flex;
flex-direction: column;
align-items: center;
min-height: 100vh;
}
.container {
width: 95%;
max-width: 900px;
padding: 20px;
background-color: #fff;
box-shadow: 0 0 10px rgba(0, 0, 0, 0.1);
border-radius: 8px;
margin-top: 20px;
margin-bottom: 20px;
display: flex;
flex-direction: column;
}
.header {
text-align: center;
margin-bottom: 20px;
}
.header h1 {
font-size: 2em;
color: #333;
}
.form-group {
margin-bottom: 15px;
}
.form-group textarea {
width: 100%;
padding: 10px;
border: 1px solid #ccc;
border-radius: 5px;
font-size: 16px;
box-sizing: border-box;
resize: vertical;
}
button {
padding: 10px 15px;
border: none;
border-radius: 5px;
background-color: #007bff;
color: white;
font-size: 18px;
cursor: pointer;
transition: background-color 0.3s ease;
}
button:hover {
background-color: #0056b3;
}
#output {
margin-top: 20px;
padding: 15px;
border: 1px solid #ddd;
border-radius: 5px;
background-color: #f9f9f9;
white-space: pre-wrap;
word-break: break-word;
overflow-y: auto;
max-height: 100vh;
}
#output strong {
font-weight: bold;
}
.animated-text {
position: fixed;
top: 20px;
left: 20px;
font-size: 1.5em;
color: rgba(0, 0, 0, 0.1);
pointer-events: none;
z-index: -1;
}
@media (max-width: 768px) {
.container {
width: 98%;
margin-top: 10px;
margin-bottom: 10px;
padding: 15px;
}
.header h1 {
font-size: 1.8em;
}
.form-group textarea, .form-group input[type="text"] {
font-size: 14px;
padding: 8px;
}
button {
font-size: 16px;
padding: 8px 12px;
}
#output {
font-size: 14px;
padding: 10px;
margin-top: 15px;
}
}
</style>
</head>
<body>
<div class="animated-text animate__animated animate__fadeIn animate__infinite infinite">AI POWERED</div>
<div class="container">
<div class="header animate__animated animate__fadeInDown">
</div>
<div class="form-group animate__animated animate__fadeInLeft">
<textarea id="text" rows="5" placeholder="Enter text"></textarea>
</div>
<button onclick="generateText()" class="animate__animated animate__fadeInUp">Generate Reasoning</button>
<div id="output" class="animate__animated">
<strong >Response:</strong><br>
<span id="generatedText"></span>
</div>
</div>
<script>
let eventSource = null;
let accumulatedText = "";
let lastResponse = "";
async function generateText() {
const inputText = document.getElementById("text").value;
document.getElementById("generatedText").innerText = "";
accumulatedText = "";
if (eventSource) {
eventSource.close();
}
const temp = 0.7;
const top_k_val = 40;
const top_p_val = 0.0;
const repetition_penalty_val = 1.2;
const requestData = {
text: inputText,
temp: temp,
top_k: top_k_val,
top_p: top_p_val,
reppenalty: repetition_penalty_val
};
eventSource = new EventSource('/api/v1/generate_stream?' + new URLSearchParams(requestData).toString());
eventSource.onmessage = function(event) {
if (event.data === "<END_STREAM>") {
eventSource.close();
const currentResponse = accumulatedText.replace("<|endoftext|>", "").replace(re.compile(r'\\s+(?=[.,,。])'), '').trim();
if (currentResponse === lastResponse.trim()) {
accumulatedText = "**Response is repetitive. Please try again or rephrase your query.**";
} else {
lastResponse = currentResponse;
}
document.getElementById("generatedText").innerHTML = marked.parse(accumulatedText);
return;
}
accumulatedText += event.data;
let partialText = accumulatedText.replace("<|endoftext|>", "").replace(re.compile(r'\\s+(?=[.,,。])'), '').trim();
document.getElementById("generatedText").innerHTML = marked.parse(partialText);
};
eventSource.onerror = function(error) {
console.error("SSE error", error);
eventSource.close();
};
const outputDiv = document.getElementById("output");
outputDiv.classList.add("show");
}
function base64ToBlob(base64Data, contentType) {
contentType = contentType || '';
const sliceSize = 1024;
const byteCharacters = atob(base64Data);
const bytesLength = byteCharacters.length;
const slicesCount = Math.ceil(bytesLength / sliceSize);
const byteArrays = new Array(slicesCount);
for (let sliceIndex = sliceIndex < slicesCount; ++sliceIndex) {
const begin = sliceIndex * sliceSize;
const end = Math.min(begin + sliceSize, bytesLength);
const bytes = new Array(end - begin);
for (let offset = begin, i = 0; offset < end; ++i, ++offset) {
bytes[i] = byteCharacters[offset].charCodeAt(0);
}
byteArrays[sliceIndex] = new Uint8Array(bytes);
}
return new Blob(byteArrays, { type: contentType });
}
</script>
</body>
</html>
"""
HTML_CODE = html_code
GPT2_FOLDER = "./GPT2"
MODEL_FILE = "gpt2-pytorch_model.bin"
ENCODER_FILE = "encoder.json"
VOCAB_FILE = "vocab.bpe"
CONFIG_FILE = "config.json"
GPT2CONFHG = "https://huggingface.co/openai-community/gpt2/resolve/main/config.json"
MODEL_URL = "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin"
ENCODER_URL = "https://raw.githubusercontent.com/graykode/gpt-2-Pytorch/refs/heads/master/GPT2/encoder.json"
VOCAB_URL = "https://raw.githubusercontent.com/graykode/gpt-2-Pytorch/refs/heads/master/GPT2/vocab.bpe"
TRANSLATION_FOLDER = "./TranslationModel"
TRANSLATION_MODEL_WEIGHTS_FILE = "pytorch_model.bin"
TRANSLATION_MODEL_CONFIG_FILE = "config.json"
TRANSLATION_MODEL_VOCAB_FILE = "sentencepiece.bpe.model"
TRANSLATION_MODEL_WEIGHTS_URL = "https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt/resolve/main/pytorch_model.bin"
TRANSLATION_MODEL_CONFIG_URL = "https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt/resolve/main/config.json"
TRANSLATION_MODEL_VOCAB_URL = "https://huggingface.co/facebook/mbart-large-50-many-to-many-mmt/resolve/main/sentencepiece.bpe.model"
TRANSLATION_MODEL_FILES_URLS = [
(TRANSLATION_MODEL_WEIGHTS_URL, TRANSLATION_MODEL_WEIGHTS_FILE),
(TRANSLATION_MODEL_CONFIG_URL, TRANSLATION_MODEL_CONFIG_FILE),
(TRANSLATION_MODEL_VOCAB_URL, TRANSLATION_MODEL_VOCAB_FILE),
]
CODEGEN_FOLDER = "./CodeGenModel"
CODEGEN_MODEL_NAME = "codegen-350M-multi"
CODEGEN_MODEL_WEIGHTS = "pytorch_model.bin"
CODEGEN_CONFIG = "config.json"
CODEGEN_VOCAB = "vocab.json"
CODEGEN_MERGES = "merges.txt"
CODEGEN_MODEL_WEIGHTS_URL = "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/pytorch_model.bin"
CODEGEN_CONFIG_URL = "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json"
CODEGEN_VOCAB_URL = "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/vocab.json"
CODEGEN_MERGES_URL = "https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/merges.txt"
CODEGEN_FILES_URLS = [
(CODEGEN_MODEL_WEIGHTS_URL, CODEGEN_MODEL_WEIGHTS),
(CODEGEN_CONFIG_URL, CODEGEN_CONFIG),
(CODEGEN_VOCAB_URL, CODEGEN_VOCAB),
(CODEGEN_MERGES_URL, CODEGEN_MERGES),
]
MUSICGEN_FOLDER = "./MusicGenModel"
MUSICGEN_MODEL_NAME = "melody"
MUSICGEN_MODEL_WEIGHTS = "pytorch_model.bin"
MUSICGEN_CONFIG = "config.json"
MUSICGEN_SAMPLE_RATE = 32000
MUSICGEN_DURATION = 8
MUSICGEN_MODEL_WEIGHTS_URL = "https://huggingface.co/facebook/musicgen-small/resolve/main/pytorch_model.bin"
MUSICGEN_CONFIG_URL = "https://huggingface.co/facebook/musicgen-small/resolve/main/config.json"
MUSICGEN_FILES_URLS = [
(MUSICGEN_MODEL_WEIGHTS_URL, MUSICGEN_MODEL_WEIGHTS),
(MUSICGEN_CONFIG_URL, MUSICGEN_CONFIG)
]
SUMMARIZATION_FOLDER = "./SummarizationModel"
SUMMARIZATION_MODEL_WEIGHTS = "pytorch_model.bin"
SUMMARIZATION_CONFIG = "config.json"
SUMMARIZATION_VOCAB = "vocab.json"
SUMMARIZATION_MODEL_WEIGHTS_URL = "https://huggingface.co/facebook/bart-large-cnn/resolve/main/pytorch_model.bin"
SUMMARIZATION_CONFIG_URL = "https://huggingface.co/facebook/bart-large-cnn/resolve/main/config.json"
SUMMARIZATION_VOCAB_URL = "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json"
SUMMARIZATION_FILES_URLS = [
(SUMMARIZATION_MODEL_WEIGHTS_URL, SUMMARIZATION_MODEL_WEIGHTS),
(SUMMARIZATION_CONFIG_URL, SUMMARIZATION_CONFIG),
(SUMMARIZATION_VOCAB_URL, SUMMARIZATION_VOCAB)
]
TTS_FOLDER = "./TTSModel"
TTS_MODEL_NAME = "vits"
TTS_MODEL_CONFIG = "config.json"
TTS_MODEL_WEIGHTS = "pytorch_model.bin"
TTS_VOCAB = "vocab.json"
TTS_CONFIG_URL = "https://huggingface.co/kakao-enterprise/vits-vctk/resolve/main/config.json"
TTS_MODEL_WEIGHTS_URL = "https://huggingface.co/kakao-enterprise/vits-vctk/resolve/main/pytorch_model.bin"
TTS_VOCAB_URL = "https://huggingface.co/kakao-enterprise/vits-vctk/resolve/main/vocab.json"
TTS_FILES_URLS = [
(TTS_CONFIG_URL, TTS_MODEL_CONFIG),
(TTS_MODEL_WEIGHTS_URL, TTS_MODEL_WEIGHTS),
(TTS_VOCAB_URL, TTS_VOCAB)
]
STT_FOLDER = "./STTModel"
STT_MODEL_NAME = "wav2vec2"
STT_MODEL_WEIGHTS = "pytorch_model.bin"
STT_CONFIG = "config.json"
STT_VOCAB = "vocab.json"
STT_MODEL_WEIGHTS_URL = "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/pytorch_model.bin"
STT_CONFIG_URL = "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json"
STT_VOCAB_URL = "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/vocab.json"
STT_FILES_URLS = [
(STT_MODEL_WEIGHTS_URL, STT_MODEL_WEIGHTS),
(STT_CONFIG_URL, STT_CONFIG),
(STT_VOCAB_URL, STT_VOCAB)
]
SENTIMENT_FOLDER = "./SentimentModel"
SENTIMENT_MODEL_WEIGHTS = "pytorch_model.bin"
SENTIMENT_VOCAB = "vocab.json"
SENTIMENT_CONFIG_FILE = "config.json"
SENTIMENT_MODEL_WEIGHTS_URL = "https://huggingface.co/climatebert/distilroberta-base-climate-sentiment/resolve/main/pytorch_model.bin"
SENTIMENT_VOCAB_URL = "https://huggingface.co/climatebert/distilroberta-base-climate-sentiment/resolve/main/vocab.json"
SENTIMENT_CONFIG_URL = "https://huggingface.co/climatebert/distilroberta-base-climate-sentiment/resolve/main/config.json"
SENTIMENT_FILES_URLS = [
(SENTIMENT_MODEL_WEIGHTS_URL, SENTIMENT_MODEL_WEIGHTS),
(SENTIMENT_VOCAB_URL, SENTIMENT_VOCAB),
(SENTIMENT_CONFIG_URL, SENTIMENT_CONFIG_FILE)
]
IMAGEGEN_FOLDER = "./ImageGenModel"
IMAGEGEN_MODEL_WEIGHTS = "diffusion_pytorch_model.bin"
IMAGEGEN_CONFIG = "config.json"
IMAGEGEN_MODEL_WEIGHTS_URL = "https://huggingface.co/stabilityai/sd-vae-ft-mse/resolve/main/diffusion_pytorch_model.bin"
IMAGEGEN_CONFIG_URL = "https://huggingface.co/stabilityai/sd-vae-ft-mse/resolve/main/config.json"
IMAGEGEN_FILES_URLS = [
(IMAGEGEN_MODEL_WEIGHTS_URL, IMAGEGEN_MODEL_WEIGHTS),
(IMAGEGEN_CONFIG_URL, IMAGEGEN_CONFIG)
]
IMAGE_TO_3D_FOLDER = "./ImageTo3DModel"
IMAGE_TO_3D_MODEL_WEIGHTS = "pytorch_model.bin"
IMAGE_TO_3D_CONFIG = "config.json"
IMAGE_TO_3D_MODEL_WEIGHTS_URL = "https://huggingface.co/zxhezexin/openlrm-obj-base-1.1/resolve/main/pytorch_model.bin"
IMAGE_TO_3D_CONFIG_URL = "https://huggingface.co/zxhezexin/openlrm-obj-base-1.1/resolve/main/config.json"
IMAGE_TO_3D_FILES_URLS = [
(IMAGE_TO_3D_MODEL_WEIGHTS_URL, IMAGE_TO_3D_MODEL_WEIGHTS),
(IMAGE_TO_3D_CONFIG_URL, IMAGE_TO_3D_CONFIG)
]
TEXT_TO_VIDEO_FOLDER = "./TextToVideoModel"
TEXT_TO_VIDEO_MODEL_WEIGHTS = "diffusion_pytorch_model.bin"
TEXT_TO_VIDEOX_MODEL_WEIGHTS = "diffusion_pytorch_model.fp16.bin"
TEXT_TO_VIDEO_CONFIG = "config.json"
TEXT_TO_VIDEO_VOCAB = "vocab.json"
TEXT_TO_VIDEO_MODEL_WEIGHTS_URL_UNET = "https://huggingface.co/ali-vilab/text-to-video-ms-1.7b/resolve/main/unet/diffusion_pytorch_model.fp16.bin"
TEXT_TO_VIDEO_CONFIG_URL_UNET = "https://huggingface.co/ali-vilab/text-to-video-ms-1.7b/resolve/main/unet/config.json"
TEXT_TO_VIDEO_MODEL_WEIGHTS_URL_VAE = "https://huggingface.co/ali-vilab/text-to-video-ms-1.7b/resolve/main/vae/diffusion_pytorch_model.fp16.bin"
TEXT_TO_VIDEO_CONFIG_URL_VAE = "https://huggingface.co/ali-vilab/text-to-video-ms-1.7b/resolve/main/vae/config.json"
TEXT_TO_VIDEO_VOCAB_URL = "https://huggingface.co/ali-vilab/text-to-video-ms-1.7b/resolve/main/tokenizer/vocab.json"
TEXT_TO_VIDEO_FILES_URLS = [
(TEXT_TO_VIDEO_MODEL_WEIGHTS_URL_UNET, TEXT_TO_VIDEO_MODEL_WEIGHTS),
(TEXT_TO_VIDEO_MODEL_WEIGHTS_URL_UNET, TEXT_TO_VIDEOX_MODEL_WEIGHTS),
(TEXT_TO_VIDEO_CONFIG_URL_UNET, TEXT_TO_VIDEO_CONFIG),
(TEXT_TO_VIDEO_MODEL_WEIGHTS_URL_VAE, TEXT_TO_VIDEO_MODEL_WEIGHTS),
(TEXT_TO_VIDEO_MODEL_WEIGHTS_URL_VAE, TEXT_TO_VIDEOX_MODEL_WEIGHTS),
(TEXT_TO_VIDEO_CONFIG_URL_VAE, TEXT_TO_VIDEO_CONFIG),
(TEXT_TO_VIDEO_VOCAB_URL, TEXT_TO_VIDEO_VOCAB),
]
GFPGAN_FOLDER = "./GFPGAN"
GFPGAN_MODEL_FILE = "GFPGANv1.4.pth"
GFPGAN_URL = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth"
RESTOREFORMER_FOLDER = "./RestoreFormer"
RESTOREFORMER_MODEL_FILE = "RestoreFormer.pth"
RESTOREFORMER_URL = "https://github.com/TencentARC/GFPGAN/releases/download/v1.3.4/RestoreFormer.pth"
CODEFORMER_FOLDER = "./CodeFormer"
CODEFORMER_MODEL_FILE = "codeformer.pth"
CODEFORMER_URL = "https://github.com/sczhou/CodeFormer/releases/download/v0.1.0/codeformer.pth"
REALESRGAN_FOLDER = "./RealESRGAN"
REALESRGAN_MODEL_FILE = "RealESRGAN_x2plus.pth"
REALESRGAN_URL = "https://github.com/xinntao/Real-ESRGAN/releases/download/v0.2.1/RealESRGAN_x2plus.pth"
kp = "https://huggingface.co/usyd-community/vitpose-base-simple/resolve/main/model.safetensors"
kp_file = "kp_detector.safetensors"
aud = "https://huggingface.co/vinthony/SadTalker/resolve/main/auido2pose_00140-model.pth"
aud_file = "auido2pose_00140-model.pth"
wav = "https://huggingface.co/facebook/wav2vec2-base/resolve/main/pytorch_model.bin"
wav_file = "wav2vec2.bin"
gen = "https://huggingface.co/vinthony/SadTalker/resolve/main/wav2lip.pth"
gen_file = "generator.bin"
mapx = "https://huggingface.co/vinthony/SadTalker/resolve/main/mapping_00229-model.pth.tar"
mapx_file = "mapping.pth"
den = "https://huggingface.co/KwaiVGI/LivePortrait/resolve/main/liveportrait/base_models/motion_extractor.pth"
den_file = "dense_motion.pth"
SADTALKER_KP_FOLDER = "checkpoints"
SADTALKER_KP_MODEL_FILE = kp_file
SADTALKER_KP_URL = kp
SADTALKER_AUD_FOLDER = "checkpoints"
SADTALKER_AUD_MODEL_FILE = aud_file
SADTALKER_AUD_URL = aud
SADTALKER_WAV_FOLDER = "checkpoints"
SADTALKER_WAV_MODEL_FILE = wav_file
SADTALKER_WAV_URL = wav
SADTALKER_GEN_FOLDER = "checkpoints"
SADTALKER_GEN_MODEL_FILE = gen_file
SADTALKER_GEN_URL = gen
SADTALKER_MAPX_FOLDER = "checkpoints"
SADTALKER_MAPX_MODEL_FILE = mapx_file
SADTALKER_MAPX_URL = mapx
SADTALKER_DEN_FOLDER = "checkpoints"
SADTALKER_DEN_MODEL_FILE = den_file
SADTALKER_DEN_URL = den
SADTALKER_CHECKPOINTS_FOLDER = "./checkpoints"
SADTALKER_CONFIG_FOLDER = "./src/config"