Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
·
7a8d600
1
Parent(s):
dc5f58b
readme file loading#
Browse files- evaluations/models.py +2 -2
- evaluations/repo_evaluations.py +3 -3
evaluations/models.py
CHANGED
@@ -3,7 +3,7 @@ from huggingface_hub import InferenceClient
|
|
3 |
import os
|
4 |
|
5 |
system_messages = { "STRICT": "You are a chatbot evaluating github repositories, their python codes and corresponding readme files. Strictly answer the questions with Yes or No.",
|
6 |
-
"HELP": "You are a chatbot evaluating github repositories, their python codes and corresponding readme files. Please help me answer the following question." }
|
7 |
|
8 |
class LocalLLM():
|
9 |
def __init__(self, model_name):
|
@@ -33,4 +33,4 @@ class RemoteLLM():
|
|
33 |
model=self.model_name, max_tokens=500, stream=False,
|
34 |
messages=[{"role": "system", "content": system_messages[response_type]},
|
35 |
{"role": "user", "content": prompt}])
|
36 |
-
return message['choices'][0]
|
|
|
3 |
import os
|
4 |
|
5 |
system_messages = { "STRICT": "You are a chatbot evaluating github repositories, their python codes and corresponding readme files. Strictly answer the questions with Yes or No.",
|
6 |
+
"HELP": "You are a chatbot evaluating github repositories, their python codes and corresponding readme files. Please help me answer the following question. Keep your answers short, and informative." }
|
7 |
|
8 |
class LocalLLM():
|
9 |
def __init__(self, model_name):
|
|
|
33 |
model=self.model_name, max_tokens=500, stream=False,
|
34 |
messages=[{"role": "system", "content": system_messages[response_type]},
|
35 |
{"role": "user", "content": prompt}])
|
36 |
+
return message['choices'][0]['message']['content']
|
evaluations/repo_evaluations.py
CHANGED
@@ -41,9 +41,9 @@ def evaluate(llm, verbose, repo_url, title=None, year=None):
|
|
41 |
|
42 |
zip = zipfile.ZipFile(repository_zip_name)
|
43 |
readme = fetch_readme(zip)
|
44 |
-
|
45 |
-
if (llm):
|
46 |
-
summary = llm.predict("HELP", "{readme}\nBased on the readme file above can you give a quick summary of this repository?")
|
47 |
log(verbose, "LOG", f"Summary: {summary}")
|
48 |
|
49 |
results["pred_stars"] = fetch_repo_stars(verbose, repo_url, token)
|
|
|
41 |
|
42 |
zip = zipfile.ZipFile(repository_zip_name)
|
43 |
readme = fetch_readme(zip)
|
44 |
+
|
45 |
+
if ((readme != "") & llm):
|
46 |
+
summary = llm.predict("HELP", f"{readme}\nBased on the readme file above can you give a quick summary of this repository?")
|
47 |
log(verbose, "LOG", f"Summary: {summary}")
|
48 |
|
49 |
results["pred_stars"] = fetch_repo_stars(verbose, repo_url, token)
|