import torch from transformers import AutoProcessor, AutoTokenizer class Config: EOS_TOKEN_ID = 50256 QUESTION_ANSWER_SEPARATOR_ID = 50295 # Special token ID for question-answer separation IMAGE_SEPARATOR_TOKENS = [685, 36259, 14041, 60, 220] phi_model_name = "microsoft/phi-2" model_name = "openai/clip-vit-base-patch32" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") processor = AutoProcessor.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(phi_model_name, trust_remote_code=True)