import gradio as gr from utils import submit_gradio_module, load_retrieval_results from fuzzywuzzy import fuzz HEADER = """

The Arabic RAG Leaderboard

The only leaderboard you will require for your RAG needs 🏆

""" ABOUT_SECTION = """ ## About The Arabic RAG Leaderboard is designed to evaluate and compare the performance of Retrieval-Augmented Generation (RAG) models on a set of retrieval and generative tasks. By leveraging a comprehensive evaluation framework, the leaderboard provides a detailed assessment of a model's ability to retrieve relevant information and generate accurate, coherent, and contextually appropriate responses. ### Why Focus on RAG Models? The Arabic RAG Leaderboard is specifically designed to assess **RAG models**, which combine retrieval mechanisms with generative capabilities to enhance the quality and relevance of generated content. These models are particularly useful in scenarios where access to up-to-date and contextually relevant information is crucial. While foundational models can be evaluated, the primary focus is on RAG models that excel in both retrieval and generation tasks. ### How to Submit Your Model? Navigate to the submission section below to submit your RAG model from the HuggingFace Hub for evaluation. Ensure that your model is public and the submitted metadata (precision, revision, #params) is accurate. ### Contact For any inquiries or assistance, feel free to reach out through the community tab at [Navid-AI Community](https://huggingface.co/spaces/Navid-AI/The-Arabic-Rag-Leaderboard/discussions) or via [email](mailto:support@navid-ai.com). """ CITATION_BUTTON_LABEL = """ Copy the following snippet to cite these results """ CITATION_BUTTON_TEXT = """ @misc{AraGen, author = {Mohaned A. Rashad, Hamza Shahid}, title = {The Arabic RAG Leaderboard}, year = {2025}, publisher = {Navid-AI}, howpublished = "url{https://huggingface.co/spaces/Navid-AI/The-Arabic-Rag-Leaderboard}" } """ retrieval_df = None original_columns_order = None def search_leaderboard(model_name, columns_to_show): if len(model_name.strip()) == 0: return retrieval_df.loc[:, columns_to_show] threshold = 95 # You can adjust this value to make the search more or less strict filtered_df = retrieval_df.copy() def calculate_similarity(row): similarity = fuzz.partial_ratio(model_name.lower(), row["Model"].lower()) return similarity if similarity >= threshold else 0 # Add a new column for similarity scores filtered_df["similarity"] = filtered_df.apply(calculate_similarity, axis=1) # Filter and sort the dataframe filtered_df = filtered_df[filtered_df["similarity"] > 0].sort_values('similarity', ascending=False) # Remove the similarity column before returning filtered_df = filtered_df.drop('similarity', axis=1) # Filter the columns to show filtered_df = filtered_df.loc[:, columns_to_show] return filtered_df def main(): global retrieval_df, original_columns_order retrieval_df = load_retrieval_results() retrieval_df[["Model"]] = retrieval_df[["Model"]].map(lambda x: f'{x}') retrieval_df.drop(columns=["Revision", "Precision", "Task"], inplace=True) retrieval_df.sort_values("Web Search Dataset (Overall Score)", ascending=False, inplace=True) columns_to_show = ["Model", "Web Search Dataset (Overall Score)", "Model Size (in Millions)", "Embedding Dimension", "Max Tokens", "Num Likes"] with gr.Blocks() as demo: gr.HTML(HEADER) with gr.Tabs(): with gr.Tab("đŸ•ĩī¸â€â™‚ī¸ Retrieval"): with gr.Tabs(): with gr.Tab("👑 Leaderboard"): with gr.Row(): search_box_retrieval = gr.Textbox( placeholder="Search for models...", label="Search", scale=5 ) columns_to_show_input = gr.CheckboxGroup( label="Columns to Show", choices=retrieval_df.columns.tolist(), value=columns_to_show, scale=4 ) retrieval_leaderboard = gr.Dataframe( value=retrieval_df[columns_to_show], datatype="markdown", wrap=True, show_fullscreen_button=True, interactive=False ) # Submit the search box and the leaderboard search_box_retrieval.input( search_leaderboard, inputs=[search_box_retrieval, columns_to_show_input], outputs=retrieval_leaderboard ) columns_to_show_input.select( lambda columns: retrieval_df.loc[:, columns], inputs=columns_to_show_input, outputs=retrieval_leaderboard ) with gr.Tab("đŸĩī¸ Submit Retriever"): submit_gradio_module("Retriever") with gr.Tab("â„šī¸ About"): gr.Markdown(ABOUT_SECTION) with gr.Tab("📊 Reranking"): with gr.Tabs(): with gr.Tab("👑 Leaderboard"): search_box_reranker = gr.Textbox( placeholder="Search for models...", label="Search", interactive=True ) reranker_leaderboard = gr.Dataframe( value=retrieval_df[columns_to_show], datatype="markdown", wrap=True, show_fullscreen_button=True, interactive=False ) search_box_reranker.submit( search_leaderboard, inputs=search_box_reranker, outputs=reranker_leaderboard ) with gr.Tab("đŸĩī¸ Submit Reranker"): submit_gradio_module("Reranker") with gr.Tab("â„šī¸ About"): gr.Markdown(ABOUT_SECTION) # with gr.Tab("🧠 LLM Context Answering"): # with gr.Tabs(): # with gr.Tab("Leaderboard"): # pass # submit_gradio_module("LLM") with gr.Row(): with gr.Accordion("📙 Citation", open=False): gr.Textbox( value=CITATION_BUTTON_TEXT, label=CITATION_BUTTON_LABEL, lines=20, elem_id="citation-button", show_copy_button=True, ) demo.launch() if __name__ == "__main__": main()