Spaces:
Sleeping
Sleeping
#import initialize | |
# vectordb = initialize.initialize() | |
import embed | |
vectordb = embed.initialize() | |
from langchain_openai import ChatOpenAI | |
from langchain.memory import ConversationBufferMemory | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain.chains import VectorDBQA | |
from langchain_community.llms import OpenAI | |
from langchain_core.prompts import PromptTemplate | |
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline | |
from langchain.chains import LLMChain | |
from langchain_google_genai import GoogleGenerativeAI | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
from langchain.chains import ChatVectorDBChain | |
import gradio as gr | |
import requests | |
import os | |
from langchain_ollama.llms import OllamaLLM | |
from langchain_community.llms import Ollama | |
from langchain_huggingface import HuggingFaceEndpoint | |
import sys | |
sys.path.append('../..') | |
# For Google Colab | |
''' | |
from google.colab import userdata | |
OPENAI_API_KEY = userdata.get('OPENAI_API_KEY') | |
hf_token = userdata.get('hf_token') | |
GEMINI_API_KEY = userdata.get('GEMINI_API_KEY') | |
# For Desktop | |
from dotenv import load_dotenv, find_dotenv | |
_ = load_dotenv(find_dotenv()) # Read local .env file | |
OPENAI_API_KEY = os.environ['OPENAI_API_KEY'] | |
hf_token = os.environ['hf_token'] | |
GEMINI_API_KEY = os.environ['GEMINI_API_KEY'] | |
''' | |
# For Hugging Face | |
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY') | |
HF_token = os.environ.get('HF_token') | |
GEMINI_API_KEY = os.environ.get('GEMINI_API_KEY') | |
fs_token = os.environ.get('fs_token') | |
llm_name = "gpt-3.5-turbo-0125" | |
chat_history_doc = [] | |
chat_history_IS = [] | |
# For getting source documents | |
def get_file(source_documents): | |
files = set() | |
for doc in source_documents: | |
file = os.path.basename(doc.metadata['source']) | |
files.add(file) | |
# Print unique filenames | |
return list(set(files)) | |
def chat_query_doc(question, chat_history_doc): | |
query = f"""Please provide a precise, point-wise reply to the query: {question}.\ | |
Highlight the important points using properly formatted text, such as bullet points, bold text, or italics where appropriate.""" | |
#llm = OllamaLLM(model="llama3") | |
#llm = Ollama(model="llama3") | |
#repo_id = "mistralai/Mistral-7B-Instruct-v0.2" | |
#llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY) | |
#llm = GoogleGenerativeAI(model = "gemini-pro", google_api_key = GEMINI_API_KEY) | |
#llm = ChatGoogleGenerativeAI(model = "gemini-1.0-pro", google_api_key = GEMINI_API_KEY, temperature = 0) | |
llm = HuggingFaceEndpoint(repo_id="HuggingFaceH4/zephyr-7b-beta", | |
temperature=0.01, | |
repetition_penalty=1.02, | |
huggingfacehub_api_token=HF_token, | |
) | |
retriever = vectordb.as_retriever() | |
memory_doc = ConversationBufferMemory(memory_key="chat_history", return_messages=True, output_key="answer") | |
qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, return_source_documents=True, memory=memory_doc) | |
result = qa({"question": query, "chat_history": chat_history_doc}) | |
chat_history_doc.append((question, result["answer"])) | |
source_docs = result["source_documents"] | |
file_names = get_file(source_docs) | |
file_name = ', '.join([f"{x}" for x in file_names[:3]]) | |
return result["answer"] + "\n\nSources : " + file_name | |
def chat_query_IS(question, chat_history_IS): | |
""" | |
Handles queries about Indian/International Standards using OpenAI model. | |
""" | |
llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY) | |
#llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GEMINI_API_KEY) ### | |
#llm = OllamaLLM(model="unsloth/Llama-3.2-3B") | |
# llm = HuggingFacePipeline.from_model_id( | |
# model_id=llm_name, # Replace with a valid Hugging Face model ID | |
# task="text-generation", # Specify the appropriate task for your model | |
# device=0, # Use -1 for CPU or 0 for GPU | |
# model_kwargs={"temperature": 0.1} | |
# ) | |
system_prompt = f""" | |
Provide an elaborate, detailed and point-wise reply about the topic as per relevant IS/IEEE/BIS standards: | |
Topic: {question} | |
At the end of your reply, quote the relevant standard referred. | |
""" | |
system = f""" Provide a reply poetically precise as william shakespeare for the Topic : {question}""" | |
result = llm.invoke(system_prompt) | |
chat_history_IS.append((system_prompt, result.content)) | |
return result.content | |
iface_doc = gr.ChatInterface( | |
fn=chat_query_doc, | |
title="""Standard TS of POWERGRID""", | |
concurrency_limit = None, | |
examples = ["What should be the GIB height outside the GIS hall ?" , | |
"STATCOM Station Ratings" , | |
"Specifications of XLPE POWER Cables.", | |
"Specification for Ethernet Switches in SAS."] , | |
theme=gr.themes.Base(), | |
fill_height = True, | |
delete_cache = (300,360), | |
css = "CSS/chat_style.css", | |
).queue() | |
iface_IS = gr.ChatInterface( | |
fn = chat_query_IS, | |
title = """Indian / International Standards""", | |
concurrency_limit = None, | |
examples = ["Type Tests for HV Switchgears." , | |
"Measurement of acoustic noise level of Transformers & Reactors" , | |
"Technical Requirement for 765kV class Transformer", | |
"Specification of Distance Relays"] , | |
theme=gr.themes.Base(), | |
fill_height = True, | |
delete_cache = (300,360), | |
css = "CSS/chat_style.css", | |
).queue() | |
Title= "# Conversational BOT for Model-TS & Indian / International Standards" | |
Description = """ | |
### Welcome to the Language Model (SS-Engg-Dept.)! ๐ | |
This model is trained on **Model Technical Specifications** of the SS-Engg. Dept. and leverages the power of **ChatGPT** to answer your queries based on: | |
* Relevant TS, GTR & Specific Requirements ๐ | |
* International/Indian Standards ๐๐ฎ๐ณ | |
**Tips for Effective Use:** | |
* Use elaborate questions for more accurate responses. ๐ค | |
* Clear the chat if you don't receive a reply. ๐ | |
* Include **Specific Keywords** in your query for precise results. ๐ฏ | |
""" | |
with gr.Blocks(css="CSS/style.css", fill_height=True) as demo: | |
# history = gr.State([]) # Initialize the state component | |
with gr.Column(): | |
with gr.Row(): | |
with gr.Column(scale=1): | |
gr.Image("Images/Chatbot.png", width = 110, show_download_button = False, show_label = False, show_share_button = False, elem_id = "Logo") | |
with gr.Column(scale=3): | |
gr.Markdown(Title) | |
with gr.Column(scale=1): | |
gr.Image("Images/PG Logo.png", width = 200, show_download_button = False, show_label = False, show_share_button = False, elem_id = "PG_Logo") | |
with gr.Row(): | |
gr.Markdown(Description) | |
with gr.Row(equal_height=True): | |
with gr.Column(elem_classes = ["chat_container"]): | |
iface_doc.render() | |
with gr.Column(elem_classes = ["chat_container"]): | |
iface_IS.render() | |
#if __name__ == "__main__": | |
demo.launch(debug=True) |