Datasets:

Modalities:
Tabular
Text
Formats:
json
Size:
< 1K
Libraries:
Datasets
Dask
License:
model
stringclasses
2 values
model_api_url
stringclasses
2 values
model_api_key
stringclasses
2 values
model_api_name
stringclasses
2 values
base_model
stringclasses
1 value
revision
stringclasses
1 value
precision
stringclasses
1 value
private
bool
1 class
weight_type
stringclasses
1 value
status
stringclasses
1 value
submitted_time
timestamp[s]
model_type
stringclasses
1 value
params
float64
0
2.21
runsh
stringclasses
2 values
adapter
stringclasses
2 values
eval_id
int64
26k
26.1k
flageval_id
int64
1.05k
1.06k
Qwen/Qwen2-VL-2B-Instruct
main
float16
false
Original
RUNNING
2025-01-24T02:46:12
馃煝 : pretrained
2.209
#!/bin/bash current_file="$0" current_dir="$(dirname "$current_file")" SERVER_IP=$1 SERVER_PORT=$2 PYTHONPATH=$current_dir:$PYTHONPATH accelerate launch $current_dir/model_adapter.py --server_ip $SERVER_IP --server_port $SERVER_PORT "${@:3}" --cfg $current_dir/meta.json
import torch from typing import Dict, Any import time from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor from flagevalmm.server import ServerDataset from flagevalmm.models.base_model_adapter import BaseModelAdapter from flagevalmm.server.utils import parse_args, process_images_symbol from qwen_vl_utils import process_vision_info class CustomDataset(ServerDataset): def __getitem__(self, index): data = self.get_data(index) question_id = data["question_id"] img_path = data["img_path"] qs = data["question"] qs, idx = process_images_symbol(qs) idx = set(idx) img_path_idx = [] for i in idx: if i < len(img_path): img_path_idx.append(img_path[i]) else: print("[warning] image index out of range") return question_id, img_path_idx, qs class ModelAdapter(BaseModelAdapter): def model_init(self, task_info: Dict): ckpt_path = task_info["model_path"] torch.set_grad_enabled(False) with self.accelerator.main_process_first(): tokenizer = AutoTokenizer.from_pretrained(ckpt_path, trust_remote_code=True) model = Qwen2VLForConditionalGeneration.from_pretrained( ckpt_path, device_map="auto", torch_dtype=torch.bfloat16, attn_implementation="flash_attention_2", ) model = self.accelerator.prepare_model(model, evaluation_mode=True) self.tokenizer = tokenizer if hasattr(model, "module"): model = model.module self.model = model self.processor = AutoProcessor.from_pretrained(ckpt_path) def build_message( self, query: str, image_paths=[], ) -> str: messages = [] messages.append( { "role": "user", "content": [], }, ) for img_path in image_paths: messages[-1]["content"].append( {"type": "image", "image": img_path}, ) # add question messages[-1]["content"].append( { "type": "text", "text": query, }, ) return messages def run_one_task(self, task_name: str, meta_info: Dict[str, Any]): results = [] cnt = 0 data_loader = self.create_data_loader( CustomDataset, task_name, batch_size=1, num_workers=0 ) for question_id, img_path, qs in data_loader: if cnt == 1: start_time = time.perf_counter() cnt += 1 question_id = question_id[0] img_path_flaten = [p[0] for p in img_path] qs = qs[0] messages = self.build_message(qs, image_paths=img_path_flaten) text = self.processor.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) image_inputs, video_inputs = process_vision_info(messages) inputs = self.processor( text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt", ) inputs = inputs.to("cuda") # Inference generated_ids = self.model.generate(**inputs, max_new_tokens=1024) generated_ids_trimmed = [ out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids) ] response = self.processor.batch_decode( generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False, )[0] self.accelerator.print(f"{qs}\n{response}\n\n") results.append( {"question_id": question_id, "answer": response.strip(), "prompt": qs} ) rank = self.accelerator.state.local_process_index self.save_result(results, meta_info, rank=rank) self.accelerator.wait_for_everyone() if self.accelerator.is_main_process: correct_num = self.collect_results_and_save(meta_info) total_time = time.perf_counter() - start_time print( f"Total time: {total_time}\nAverage time:{total_time / cnt}\nResults_collect number: {correct_num}" ) print("rank", rank, "finished") if __name__ == "__main__": args = parse_args() model_adapter = ModelAdapter( server_ip=args.server_ip, server_port=args.server_port, timeout=args.timeout, extra_cfg=args.cfg, ) model_adapter.run()
26,049
1,054
yi.daiteng01
https://api.lingyiwanwu.com/v1/chat/completions
876995f3b3ce41aca60b637fb51d752e
yi-vision
main
float16
false
Original
RUNNING
2025-01-24T07:22:04
馃煝 : pretrained
0
26,055
1,055
README.md exists but content is empty.
Downloads last month
731

Space using open-cn-llm-leaderboard/vlm_requests 1