Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
·
f86a683
1
Parent(s):
28b25b0
new model#
Browse files- app.py +1 -1
- evaluations/models.py +3 -1
- midl.py +1 -2
app.py
CHANGED
@@ -3,7 +3,7 @@ from evaluations.repo_evaluations import evaluate
|
|
3 |
from evaluations.models import RemoteLLM
|
4 |
import requests
|
5 |
|
6 |
-
model = RemoteLLM("
|
7 |
|
8 |
st.write("\n")
|
9 |
st.write("Welcome to the online reproducibility evaluation tool!")
|
|
|
3 |
from evaluations.models import RemoteLLM
|
4 |
import requests
|
5 |
|
6 |
+
model = RemoteLLM("meta-llama/Llama-3.1-8B-Instruct")
|
7 |
|
8 |
st.write("\n")
|
9 |
st.write("Welcome to the online reproducibility evaluation tool!")
|
evaluations/models.py
CHANGED
@@ -25,10 +25,12 @@ class LocalLLM():
|
|
25 |
class RemoteLLM():
|
26 |
def __init__(self, model_name):
|
27 |
token = os.getenv("hfToken")
|
28 |
-
self.
|
|
|
29 |
|
30 |
def predict(self, response_type, prompt):
|
31 |
for message in self.client.chat_completion(
|
|
|
32 |
messages=[{"role": "system", "content": system_messages[response_type]},
|
33 |
{"role": "user", "content": prompt}],
|
34 |
max_tokens=500,
|
|
|
25 |
class RemoteLLM():
|
26 |
def __init__(self, model_name):
|
27 |
token = os.getenv("hfToken")
|
28 |
+
self.model_name = model_name
|
29 |
+
self.client = InferenceClient(api_key=token)
|
30 |
|
31 |
def predict(self, response_type, prompt):
|
32 |
for message in self.client.chat_completion(
|
33 |
+
model=self.model_name,
|
34 |
messages=[{"role": "system", "content": system_messages[response_type]},
|
35 |
{"role": "user", "content": prompt}],
|
36 |
max_tokens=500,
|
midl.py
CHANGED
@@ -5,9 +5,8 @@ from dotenv import load_dotenv
|
|
5 |
load_dotenv()
|
6 |
token = os.getenv("githubToken")
|
7 |
|
8 |
-
|
9 |
# Load model directly
|
|
|
10 |
|
11 |
-
model = LocalLLM("codellama/CodeLlama-7b-Instruct-hf")
|
12 |
res = midl_evaluations(model)
|
13 |
res.to_csv("results_midl.csv", sep="\t", index=False)
|
|
|
5 |
load_dotenv()
|
6 |
token = os.getenv("githubToken")
|
7 |
|
|
|
8 |
# Load model directly
|
9 |
+
model = LocalLLM("meta-llama/Llama-3.1-8B-Instruct")
|
10 |
|
|
|
11 |
res = midl_evaluations(model)
|
12 |
res.to_csv("results_midl.csv", sep="\t", index=False)
|