reproduce / evaluations /validating.py
Attila Simkó
big upgrade
2db37b1
import re
from core.conversion import noop_logger
def evaluate(llm, zip, readmes, log_fn=noop_logger):
log_fn("TITLE", "\nLooking for examples for running the model...")
overall = "No"
patterns = {
'tensorflow': [
r'tf\.keras\.models\.load_model', # TensorFlow model loading
r'tf\.saved_model\.load',
r'\.predict', # Running inference
],
'pytorch': [
r'torch\.load', # PyTorch model loading
r'torch\.jit\.load', # PyTorch JIT model loading
r'\.eval', # Running inference
]
}
files = [file_path for file_path in zip.namelist() if ((file_path.endswith(".py") | file_path.endswith(".ipynb")))]
for file_path in files:
code = zip.open(file_path).read().decode("utf-8")
for framework, regex_list in patterns.items():
for pattern in regex_list:
if re.search(pattern, code):
log_fn("LOG", f"Found code for evaluating a model in {framework} framework in file: {file_path}")
overall = "Yes"
for readme in readmes:
if (readme):
if ((len(re.findall("testing", readme)) > 0)):
log_fn("LOG", "Found information about evaluations in readme")
overall = "Yes"
if (overall == "No"):
log_fn("ERROR", "Found no code for evaluating the model.")
return overall