update evaluation codes
Browse files
evaluation/evaluate_mmvp_MetaCLIP_huge.py
CHANGED
@@ -1,6 +1,4 @@
|
|
1 |
import os
|
2 |
-
import clip
|
3 |
-
from clip import load
|
4 |
import csv
|
5 |
from PIL import Image
|
6 |
import torch
|
|
|
1 |
import os
|
|
|
|
|
2 |
import csv
|
3 |
from PIL import Image
|
4 |
import torch
|
evaluation/evaluate_mmvp_MetaCLIP_large.py
CHANGED
@@ -1,14 +1,10 @@
|
|
1 |
import os
|
2 |
-
import clip
|
3 |
-
from clip import load
|
4 |
import csv
|
5 |
from PIL import Image
|
6 |
import torch
|
7 |
from tqdm import tqdm
|
8 |
import json
|
9 |
from transformers import CLIPVisionModel, CLIPModel, CLIPImageProcessor, CLIPTokenizer
|
10 |
-
import argparse
|
11 |
-
|
12 |
|
13 |
|
14 |
def benchmark_model(processor, tokenizer, model, benchmark_dir, device="cpu"):
|
|
|
1 |
import os
|
|
|
|
|
2 |
import csv
|
3 |
from PIL import Image
|
4 |
import torch
|
5 |
from tqdm import tqdm
|
6 |
import json
|
7 |
from transformers import CLIPVisionModel, CLIPModel, CLIPImageProcessor, CLIPTokenizer
|
|
|
|
|
8 |
|
9 |
|
10 |
def benchmark_model(processor, tokenizer, model, benchmark_dir, device="cpu"):
|
evaluation/evaluate_mmvp_OpenAICLIP_224.py
CHANGED
@@ -1,13 +1,10 @@
|
|
1 |
import os
|
2 |
-
import clip
|
3 |
-
from clip import load
|
4 |
import csv
|
5 |
from PIL import Image
|
6 |
import torch
|
7 |
from tqdm import tqdm
|
8 |
import json
|
9 |
from transformers import CLIPVisionModel, CLIPModel, CLIPImageProcessor, CLIPTokenizer
|
10 |
-
import argparse
|
11 |
|
12 |
|
13 |
|
|
|
1 |
import os
|
|
|
|
|
2 |
import csv
|
3 |
from PIL import Image
|
4 |
import torch
|
5 |
from tqdm import tqdm
|
6 |
import json
|
7 |
from transformers import CLIPVisionModel, CLIPModel, CLIPImageProcessor, CLIPTokenizer
|
|
|
8 |
|
9 |
|
10 |
|
evaluation/evaluate_mmvp_OpenAICLIP_336.py
CHANGED
@@ -1,13 +1,10 @@
|
|
1 |
import os
|
2 |
-
import clip
|
3 |
-
from clip import load
|
4 |
import csv
|
5 |
from PIL import Image
|
6 |
import torch
|
7 |
from tqdm import tqdm
|
8 |
import json
|
9 |
from transformers import CLIPVisionModel, CLIPModel, CLIPImageProcessor, CLIPTokenizer
|
10 |
-
import argparse
|
11 |
|
12 |
|
13 |
|
|
|
1 |
import os
|
|
|
|
|
2 |
import csv
|
3 |
from PIL import Image
|
4 |
import torch
|
5 |
from tqdm import tqdm
|
6 |
import json
|
7 |
from transformers import CLIPVisionModel, CLIPModel, CLIPImageProcessor, CLIPTokenizer
|
|
|
8 |
|
9 |
|
10 |
|
evaluation/evaluate_mmvp_SigLIP_224.py
CHANGED
@@ -1,13 +1,10 @@
|
|
1 |
import os
|
2 |
-
import clip
|
3 |
-
from clip import load
|
4 |
import csv
|
5 |
from PIL import Image
|
6 |
import torch
|
7 |
from tqdm import tqdm
|
8 |
import json
|
9 |
from transformers import SiglipProcessor, SiglipModel, SiglipImageProcessor, SiglipTokenizer
|
10 |
-
import argparse
|
11 |
|
12 |
|
13 |
|
|
|
1 |
import os
|
|
|
|
|
2 |
import csv
|
3 |
from PIL import Image
|
4 |
import torch
|
5 |
from tqdm import tqdm
|
6 |
import json
|
7 |
from transformers import SiglipProcessor, SiglipModel, SiglipImageProcessor, SiglipTokenizer
|
|
|
8 |
|
9 |
|
10 |
|
evaluation/evaluate_mmvp_SigLIP_384.py
CHANGED
@@ -1,6 +1,4 @@
|
|
1 |
import os
|
2 |
-
import clip
|
3 |
-
from clip import load
|
4 |
import csv
|
5 |
from PIL import Image
|
6 |
import torch
|
|
|
1 |
import os
|
|
|
|
|
2 |
import csv
|
3 |
from PIL import Image
|
4 |
import torch
|