File size: 7,253 Bytes
51766a2 9b704e9 51766a2 9b704e9 51766a2 9b704e9 51766a2 9b704e9 51766a2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 |
---
base_model: lmms-lab/llava-onevision-qwen2-7b-ov
datasets:
- lmms-lab/EgoLife
license: apache-2.0
library_name: transformers
pipeline_tag: video-text-to-text
tags:
- multimodal
---
# EgoGPT-7b-Demo
## Model Summary
`EgoGPT-7b-Demo` is an omni-modal model trained on egocentric datasets, achieving state-of-the-art performance on egocentric video understanding. Built on the foundation of `llava-onevision-qwen2-7b-ov`, it has been finetuned on `EgoIT-EgoLife-138k` egocentric datasets, which contains [EgoIT-99k](https://huggingface.co/datasets/lmms-lab/EgoIT-99K) and depersonalized version of [EgoLife-QA (39k)](https://huggingface.co/datasets/lmms-lab/EgoLife).
EgoGPT excels in two primary scenarios:
- **Advanced Model Integration**: EgoGPT combines LLaVA-OneVision and Whisper, improving its ability to process visual and auditory information.
- **Outstanding Benchmark Performance:** EgoGPT excels in egocentric benchmarks like EgoSchema, EgoPlan, and EgoThink, surpassing leading commercial and open-source models.
For further details, please refer to the following resources:
- 📰 Paper: [EgoLife: Towards Egocentric Life Assistant](https://arxiv.org/abs/2503.03803)
- 🪐 Project Page: [https://egolife-ai.github.io/](https://egolife-ai.github.io/)
- 📦 Datasets: https://huggingface.co/datasets/lmms-lab/EgoIT-99K & https://huggingface.co/datasets/lmms-lab/EgoLife
- 🤗 Model Collections: https://huggingface.co/collections/lmms-lab/egolife-67c04574c2a9b64ab312c342
## Usage
### Installation
1. Clone this repository.
```shell
git clone https://github.com/egolife-ntu/EgoLife
cd EgoLife/EgoGPT
```
2. Install the dependencies.
```shell
conda create -n egogpt python=3.10
conda activate egogpt
pip install --upgrade pip
pip install -e .
3. Install the dependencies for training and inference.
```shell
pip install -e ".[train]"
pip install flash-attn --no-build-isolation
```
### Quick Start
~~~python
import argparse
import copy
import os
import re
import sys
import warnings
import numpy as np
import requests
import soundfile as sf
import torch
import torch.distributed as dist
import whisper
from decord import VideoReader, cpu
from egogpt.constants import (
DEFAULT_IMAGE_TOKEN,
DEFAULT_SPEECH_TOKEN,
IGNORE_INDEX,
IMAGE_TOKEN_INDEX,
SPEECH_TOKEN_INDEX,
)
from egogpt.conversation import SeparatorStyle, conv_templates
from egogpt.mm_utils import get_model_name_from_path, process_images
from egogpt.model.builder import load_pretrained_model
from PIL import Image
from scipy.signal import resample
def setup(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
dist.init_process_group("gloo", rank=rank, world_size=world_size)
def load_video(video_path=None, audio_path=None, max_frames_num=16, fps=1):
if audio_path is not None:
speech, sample_rate = sf.read(audio_path)
if sample_rate != 16000:
target_length = int(len(speech) * 16000 / sample_rate)
speech = resample(speech, target_length)
if speech.ndim > 1:
speech = np.mean(speech, axis=1)
speech = whisper.pad_or_trim(speech.astype(np.float32))
speech = whisper.log_mel_spectrogram(speech, n_mels=128).permute(1, 0)
speech_lengths = torch.LongTensor([speech.shape[0]])
else:
speech = torch.zeros(3000, 128)
speech_lengths = torch.LongTensor([3000])
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
total_frame_num = len(vr)
avg_fps = round(vr.get_avg_fps() / fps)
frame_idx = [i for i in range(0, total_frame_num, avg_fps)]
if max_frames_num > 0 and len(frame_idx) > max_frames_num:
uniform_sampled_frames = np.linspace(
0, total_frame_num - 1, max_frames_num, dtype=int
)
frame_idx = uniform_sampled_frames.tolist()
video = vr.get_batch(frame_idx).asnumpy()
return video, speech, speech_lengths
def split_text(text, keywords):
pattern = "(" + "|".join(map(re.escape, keywords)) + ")"
parts = re.split(pattern, text)
parts = [part for part in parts if part]
return parts
def main(
pretrained_path="checkpoints/EgoGPT-7b-Demo",
video_path=None,
audio_path=None,
query="Please describe the video in detail.",
):
warnings.filterwarnings("ignore")
setup(0, 1)
device = "cuda"
device_map = "cuda"
tokenizer, model, max_length = load_pretrained_model(
pretrained_path, device_map=device_map
)
model.eval()
conv_template = "qwen_1_5"
question = f"<image>
<speech>
{query}"
conv = copy.deepcopy(conv_templates[conv_template])
conv.append_message(conv.roles[0], question)
conv.append_message(conv.roles[1], None)
prompt_question = conv.get_prompt()
video, speech, speech_lengths = load_video(
video_path=video_path, audio_path=audio_path
)
speech = torch.stack([speech]).to(device).half()
processor = model.get_vision_tower().image_processor
processed_video = processor.preprocess(video, return_tensors="pt")["pixel_values"]
image = [(processed_video, video[0].size, "video")]
parts = split_text(prompt_question, ["<image>", "<speech>"])
input_ids = []
for part in parts:
if part == "<image>":
input_ids.append(IMAGE_TOKEN_INDEX)
elif part == "<speech>":
input_ids.append(SPEECH_TOKEN_INDEX)
else:
input_ids.extend(tokenizer(part).input_ids)
input_ids = torch.tensor(input_ids, dtype=torch.long).unsqueeze(0).to(device)
image_tensor = [image[0][0].half()]
image_sizes = [image[0][1]]
generate_kwargs = {"eos_token_id": tokenizer.eos_token_id}
cont = model.generate(
input_ids,
images=image_tensor,
image_sizes=image_sizes,
speech=speech,
speech_lengths=speech_lengths,
do_sample=False,
temperature=0.5,
max_new_tokens=4096,
modalities=["video"],
**generate_kwargs,
)
text_outputs = tokenizer.batch_decode(cont, skip_special_tokens=True)
print(text_outputs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--pretrained_path", type=str, default="lmms-lab/EgoGPT-7b-Demo"
)
parser.add_argument("--video_path", type=str, default=None)
parser.add_argument("--audio_path", type=str, default=None)
parser.add_argument(
"--query", type=str, default="Please describe the video in detail."
)
args = parser.parse_args()
main(args.pretrained_path, args.video_path, args.audio_path, args.query)
~~~
## Citation
```bibtex
@inproceedings{yang2025egolife,
title={EgoLife: Towards Egocentric Life Assistant},
author={Yang, Jingkang and Liu, Shuai and Guo, Hongming and Dong, Yuhao and Zhang, Xiamengwei and Zhang, Sicheng and Wang, Pengyun and Zhou, Zitang and Xie, Binzhu and Wang, Ziyue and Ouyang, Bei and Lin, Zhengyu and Cominelli, Marco and Cai, Zhongang and Zhang, Yuanhan and Zhang, Peiyuan and Hong, Fangzhou and Widmer, Joerg and Gringoli, Francesco and Yang, Lei and Li, Bo and Liu, Ziwei},
booktitle={The IEEE/CVF Conference on Computer Vision and Pattern Recognition},
year={2025},
}
``` |