File size: 6,213 Bytes
4ffd659
 
 
 
7e5cb25
 
97826e4
7e5cb25
 
19513c9
7e5cb25
4ffd659
 
7e5cb25
4ffd659
 
 
 
 
 
7e5cb25
4ffd659
7e5cb25
 
 
 
97826e4
19513c9
7e5cb25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97826e4
 
 
 
 
 
 
 
4ffd659
 
97826e4
 
 
 
 
 
 
 
 
 
4ffd659
 
97826e4
4ffd659
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7e5cb25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ffd659
7e5cb25
 
 
4ffd659
7e5cb25
4ffd659
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
import openai
import os
import json
import requests
from hugchat import hugchat
from hugchat.login import Login
import together
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT


from dotenv import load_dotenv
load_dotenv()


TOGETHER_API_KEY = os.getenv('TOGETHER_API_KEY')
COHERE_API_KEY = os.getenv('COHERE_API_KEY')
AI21_API_KEY = os.getenv('AI21_API_KEY')
ALEPH_API_KEY = os.getenv('ALEPH_API_KEY')
OPEN_ROUTER_API_KEY = os.getenv('OPEN_ROUTER_API_KEY')
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
ANTHROPIC_API_KEY = os.getenv('ANTHROPIC_API_KEY')

# Huggingface login credentials
HUGGING_EMAIL = os.environ.get("HUGGING_EMAIL")
HUGGING_PASSWORD = os.environ.get("HUGGING_PASSWORD")

MAX_TOKENS = 700


# Log in to huggingface and grant authorization to huggingchat
sign = Login(HUGGING_EMAIL, HUGGING_PASSWORD)
cookie_path_dir = "./cookies"

try:
  cookies = sign.loadCookiesFromDir(cookie_path_dir) # This will detect if the JSON file exists, return cookies if it does and raise an Exception if it's not.

except Exception as e:
  print(e)
  
  # Save cookies to the local directory
  sign.saveCookiesToDir(cookie_path_dir)
  cookies = sign.login()

chatbot = hugchat.ChatBot(cookies=cookies.get_dict())  # or cookie_path="usercookies/<email>.json"

def hugchat_func(model, params):

    # Create a new conversation
    id = chatbot.new_conversation()
    chatbot.change_conversation(id)

    # get index from chatbot.llms of the model
    index = [i for i, x in enumerate(chatbot.llms) if x == model['api_id']][0]

    print(f"Switching to {index}")

    # set the chatbot to the model
    chatbot.switch_llm(index)

    query_result = chatbot.query(params['text'], temperature=0, max_new_tokens=MAX_TOKENS, stop=params['stop'] if params.get('stop') else None)
    
    return query_result['text']

def together_func(model, params):
    # def format_prompt(prompt, prompt_type):
    #   if prompt_type == "language":
    #       return f"Q: {prompt}\nA: "
    #   if prompt_type == "code":
    #       return f"# {prompt}"
    #   if prompt_type == "chat":
        #   return f"<human>: {prompt}\n<bot>: "
      

    together.api_key = TOGETHER_API_KEY

    # generate response
    response = together.Complete.create(
        model = model['api_id'],
        prompt=f"<human>: {params['text']}\n<bot>:",
        temperature=0,
        max_tokens=MAX_TOKENS,
        stop=["<human>", "<human>:","</s>", "<|end|>", "<|endoftext|>", "<bot>", "```\n```", "\nUser"]
    )


    return response['output']['choices'][0]['text'].rstrip(params['stop'])

def cohere(model, params):
    options = {
        "method": "POST",
        "headers": {
            "accept": "application/json",
            "content-type": "application/json",
            "authorization": f"Bearer {COHERE_API_KEY}",
        },
        "body": json.dumps({
            "max_tokens": MAX_TOKENS,
            "truncate": "END",
            "return_likelihoods": "NONE",
            "prompt": params['text'],
            "stop_sequences": [params['stop']] if params.get('stop') else [],
            "model": model['api_id'],
            "temperature": 0,
        }),
    }

    response = requests.post("https://api.cohere.ai/v1/generate", headers=options['headers'], data=options['body'])
    json_response = response.json()

    return json_response['generations'][0]['text']

def openai_func(model, params):
    
    openai.api_key = OPENAI_API_KEY

    completion = openai.ChatCompletion.create(
        model=model['api_id'],
        messages=[{"role": "user", "content": params['text']}],
        temperature=0,
        max_tokens=MAX_TOKENS,
        stop=[params['stop']] if params.get('stop') else []
    )
    
    return completion.choices[0].message.content

def ai21(model, params):
    options = {
        "headers": {
            "accept": "application/json",
            "content-type": "application/json",
            "Authorization": f"Bearer {AI21_API_KEY}",
        },
        "body": json.dumps({
            "prompt": params['text'],
            "maxTokens": MAX_TOKENS,
            "temperature": 0,
            "stopSequences": [params['stop']] if params.get('stop') else [],
        }),
    }

    response = requests.post(f"https://api.ai21.com/studio/v1/{model['api_id']}/complete", headers=options['headers'], data=options['body'])
    json_response = response.json()
    return json_response['completions'][0]['data']['text']

def openrouter(model, params):

    response = requests.post(
        url="https://openrouter.ai/api/v1/chat/completions",
        headers={
            "HTTP-Referer": 'https://benchmarks.llmonitor.com', # To identify your app. Can be set to localhost for testing
            "Authorization": "Bearer " + OPEN_ROUTER_API_KEY
        },
        data=json.dumps({
            "model": model['api_id'],
            "temperature": 0,
            "max_tokens": MAX_TOKENS,
            "stop": [params['stop']] if params.get('stop') else [],
            "messages": [ 
                {"role": "user", "content": params['text']}
            ]
        })
    )

    completion = response.json()

    return completion["choices"][0]["message"]["content"]

def anthropic_func(model,params):
    anthropic = Anthropic(
        api_key=ANTHROPIC_API_KEY
    )
    completion = anthropic.completions.create(
        model=model['api_id'],
        temperature=0,
        max_tokens_to_sample=MAX_TOKENS,
        prompt=f"{HUMAN_PROMPT} {params['text']}{AI_PROMPT}",
    )
    return completion.completion

def alephalpha(model, params):
    options = {
        "headers": {
            "Content-Type": "application/json",
            "Accept": "application/json",
            "Authorization": f"Bearer {ALEPH_API_KEY}",
        },
        "body": json.dumps({
            "model": model['api_id'],
            "prompt": params['text'],
            "maximum_tokens": MAX_TOKENS,
            "stop_sequences": [params['stop']] if params.get('stop') else [],
        }),
    }

    response = requests.post("https://api.aleph-alpha.com/complete", headers=options['headers'], data=options['body'])
    json_response = response.json()
    return json_response['completions'][0]['completion']