import os import torch from huggingface_hub import HfApi # replace this with our token # TOKEN = os.environ.get("HF_TOKEN", None) TOKEN = os.getenv("HF_TOKEN") # print(TOKEN) # OWNER = "vectara" # REPO_ID = f"{OWNER}/Humanlike" # QUEUE_REPO = f"{OWNER}/requests" # RESULTS_REPO = f"{OWNER}/results" OWNER = "tangtang1995" # Change to your org - don't forget to create a results and request dataset, with the correct format! # ---------------------------------- REPO_ID = f"{OWNER}/Humanlike" QUEUE_REPO = f"{OWNER}/requests" RESULTS_REPO = f"{OWNER}/results" # print(RESULTS_REPO) CACHE_PATH=os.getenv("HF_HOME", ".") # Local caches EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue") EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results") EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk") EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk") # print(EVAL_RESULTS_PATH) # exit() DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #"cpu" API = HfApi(token=TOKEN) DATASET_PATH = "./src/datasets/Material_Llama2_0603.xlsx" #experiment data PROMPT_PATH = "./src/datasets/prompt.xlsx" #prompt for each experiment HEM_PATH = 'vectara/hallucination_evaluation_model' HUMAN_DATA = "./src/datasets/human_data.csv" #experiment data ITEM_4_DATA = "./src/datasets/associataion_dataset.csv" #database ITEM_5_DATA = "./src/datasets/Items_5.csv" #experiment 5 need verb words # SYSTEM_PROMPT = "You are a chat bot answering questions using data. You must stick to the answers provided solely by the text in the passage provided." SYSTEM_PROMPT = "You are a participant of a psycholinguistic experiment. You will do a task on English language use." '''prompt''' # USER_PROMPT = "You are asked the question 'Provide a concise summary of the following passage, covering the core pieces of information described': " USER_PROMPT = ""